diff options
author | Curtis Dunham <Curtis.Dunham@arm.com> | 2016-07-21 17:19:18 +0100 |
---|---|---|
committer | Curtis Dunham <Curtis.Dunham@arm.com> | 2016-07-21 17:19:18 +0100 |
commit | 84f138ba96201431513eb2ae5f847389ac731aa2 (patch) | |
tree | 3aee721699295c85e4e0c2d3d4a6bb27595bfabd /tests/quick/se/00.hello/ref | |
parent | a288c94387b110112461ff5686fa727a43ddbe9c (diff) | |
download | gem5-84f138ba96201431513eb2ae5f847389ac731aa2.tar.xz |
stats: update references
Diffstat (limited to 'tests/quick/se/00.hello/ref')
96 files changed, 2429 insertions, 1076 deletions
diff --git a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/config.ini index b45f7c576..6320b231e 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -59,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -101,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -142,12 +153,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -166,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -566,12 +587,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -590,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -616,12 +647,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -640,8 +676,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -649,10 +690,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -683,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/alpha/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/linux/hello gid=100 input=cin kvmInSE=false @@ -715,10 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -762,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -773,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simerr b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simout b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simout index 7264993fd..70f465dc7 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/simout @@ -3,13 +3,13 @@ Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/minor- gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 21:54:46 -gem5 started Mar 14 2016 21:56:23 -gem5 executing on phenom, pid 28115 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/minor-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:27 +gem5 executing on e108600-lin, pid 39611 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Hello world! -Exiting @ tick 37629000 because target called exit() +Exiting @ tick 37822000 because target called exit() diff --git a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/stats.txt index 7fa71daaa..20c464e74 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/linux/minor-timing/stats.txt @@ -1,19 +1,19 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.000037 # Number of seconds simulated -sim_ticks 37494000 # Number of ticks simulated -final_tick 37494000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.000038 # Number of seconds simulated +sim_ticks 37822000 # Number of ticks simulated +final_tick 37822000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 200557 # Simulator instruction rate (inst/s) -host_op_rate 200498 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1171902214 # Simulator tick rate (ticks/s) -host_mem_usage 294520 # Number of bytes of host memory used -host_seconds 0.03 # Real time elapsed on the host +host_inst_rate 100508 # Simulator instruction rate (inst/s) +host_op_rate 100471 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 592356577 # Simulator tick rate (ticks/s) +host_mem_usage 249008 # Number of bytes of host memory used +host_seconds 0.06 # Real time elapsed on the host sim_insts 6413 # Number of instructions simulated sim_ops 6413 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.physmem.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.physmem.bytes_read::cpu.inst 23232 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 10816 # Number of bytes read from this memory system.physmem.bytes_read::total 34048 # Number of bytes read from this memory @@ -22,14 +22,14 @@ system.physmem.bytes_inst_read::total 23232 # Nu system.physmem.num_reads::cpu.inst 363 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 169 # Number of read requests responded to by this memory system.physmem.num_reads::total 532 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 619619139 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 288472822 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 908091961 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 619619139 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 619619139 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 619619139 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 288472822 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 908091961 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_read::cpu.inst 614245677 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 285971128 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 900216805 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 614245677 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 614245677 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 614245677 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 285971128 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 900216805 # Total bandwidth to/from this memory (bytes/s) system.physmem.readReqs 532 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted system.physmem.readBursts 532 # Number of DRAM read bursts, including those serviced by the write queue @@ -76,7 +76,7 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 37389500 # Total gap between requests +system.physmem.totGap 37718000 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 443 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 84 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 5 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 446 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 83 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 3 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -188,32 +188,32 @@ system.physmem.wrQLenPdf::61 0 # Wh system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see system.physmem.bytesPerActivate::samples 82 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 387.902439 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 251.688412 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 333.441746 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 19 23.17% 23.17% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 18 21.95% 45.12% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 11 13.41% 58.54% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 10 12.20% 70.73% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 1 1.22% 71.95% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 6 7.32% 79.27% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 4 4.88% 84.15% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 5 6.10% 90.24% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 8 9.76% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 385.560976 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 252.880176 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 333.081835 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 17 20.73% 20.73% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 21 25.61% 46.34% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 9 10.98% 57.32% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 11 13.41% 70.73% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 4 4.88% 75.61% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 3 3.66% 79.27% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 3 3.66% 82.93% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 5 6.10% 89.02% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 9 10.98% 100.00% # Bytes accessed per row activation system.physmem.bytesPerActivate::total 82 # Bytes accessed per row activation -system.physmem.totQLat 3129000 # Total ticks spent queuing -system.physmem.totMemAccLat 13104000 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totQLat 3215000 # Total ticks spent queuing +system.physmem.totMemAccLat 13190000 # Total ticks spent from burst creation until serviced by the DRAM system.physmem.totBusLat 2660000 # Total ticks spent in databus transfers -system.physmem.avgQLat 5881.58 # Average queueing delay per DRAM burst +system.physmem.avgQLat 6043.23 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 24631.58 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 908.09 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 24793.23 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 900.22 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 908.09 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 900.22 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 7.09 # Data bus utilization in percentage -system.physmem.busUtilRead 7.09 # Data bus utilization in percentage for reads +system.physmem.busUtil 7.03 # Data bus utilization in percentage +system.physmem.busUtilRead 7.03 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.18 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing @@ -221,7 +221,7 @@ system.physmem.readRowHits 438 # Nu system.physmem.writeRowHits 0 # Number of row buffer hits during writes system.physmem.readRowHitRate 82.33 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 70281.02 # Average gap between requests +system.physmem.avgGap 70898.50 # Average gap between requests system.physmem.pageHitRate 82.33 # Row buffer hit rate, read and write combined system.physmem_0.actEnergy 234360 # Energy for activate commands per rank (pJ) system.physmem_0.preEnergy 127875 # Energy for precharge commands per rank (pJ) @@ -232,60 +232,60 @@ system.physmem_0.actBackEnergy 21404070 # En system.physmem_0.preBackEnergy 67500 # Energy for precharge background per rank (pJ) system.physmem_0.totalEnergy 25911645 # Total energy per rank (pJ) system.physmem_0.averagePower 825.080242 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 16000 # Time in different power states +system.physmem_0.memoryStateTime::IDLE 366250 # Time in different power states system.physmem_0.memoryStateTime::REF 1040000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 30362750 # Time in different power states +system.physmem_0.memoryStateTime::ACT 30363250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states system.physmem_1.actEnergy 340200 # Energy for activate commands per rank (pJ) system.physmem_1.preEnergy 185625 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 1552200 # Energy for read commands per rank (pJ) +system.physmem_1.readEnergy 1521000 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) system.physmem_1.refreshEnergy 2034240 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 20432790 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 920250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 25465305 # Total energy per rank (pJ) -system.physmem_1.averagePower 810.835582 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 1481500 # Time in different power states +system.physmem_1.actBackEnergy 20148930 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 1168500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 25398495 # Total energy per rank (pJ) +system.physmem_1.averagePower 808.740487 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 1794750 # Time in different power states system.physmem_1.memoryStateTime::REF 1040000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 28986000 # Time in different power states +system.physmem_1.memoryStateTime::ACT 28584000 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 2009 # Number of BP lookups -system.cpu.branchPred.condPredicted 1241 # Number of conditional branches predicted +system.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 2005 # Number of BP lookups +system.cpu.branchPred.condPredicted 1239 # Number of conditional branches predicted system.cpu.branchPred.condIncorrect 379 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 1611 # Number of BTB lookups -system.cpu.branchPred.BTBHits 378 # Number of BTB hits +system.cpu.branchPred.BTBLookups 1607 # Number of BTB lookups +system.cpu.branchPred.BTBHits 377 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 23.463687 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 234 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 23.459863 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 235 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 14 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 338 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectLookups 336 # Number of indirect predictor lookups. system.cpu.branchPred.indirectHits 13 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 325 # Number of indirect misses. +system.cpu.branchPred.indirectMisses 323 # Number of indirect misses. system.cpu.branchPredindirectMispredicted 113 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 1378 # DTB read hits +system.cpu.dtb.read_hits 1365 # DTB read hits system.cpu.dtb.read_misses 11 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 1389 # DTB read accesses -system.cpu.dtb.write_hits 885 # DTB write hits +system.cpu.dtb.read_accesses 1376 # DTB read accesses +system.cpu.dtb.write_hits 884 # DTB write hits system.cpu.dtb.write_misses 3 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 888 # DTB write accesses -system.cpu.dtb.data_hits 2263 # DTB hits +system.cpu.dtb.write_accesses 887 # DTB write accesses +system.cpu.dtb.data_hits 2249 # DTB hits system.cpu.dtb.data_misses 14 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 2277 # DTB accesses -system.cpu.itb.fetch_hits 2687 # ITB hits +system.cpu.dtb.data_accesses 2263 # DTB accesses +system.cpu.itb.fetch_hits 2686 # ITB hits system.cpu.itb.fetch_misses 17 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 2704 # ITB accesses +system.cpu.itb.fetch_accesses 2703 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -299,16 +299,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 17 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 37494000 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 74988 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 37822000 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 75644 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 6413 # Number of instructions committed system.cpu.committedOps 6413 # Number of ops (including micro ops) committed -system.cpu.discardedOps 1148 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 1095 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 11.693123 # CPI: cycles per instruction -system.cpu.ipc 0.085520 # IPC: instructions per cycle +system.cpu.cpi 11.795416 # CPI: cycles per instruction +system.cpu.ipc 0.084779 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 19 0.30% 0.30% # Class of committed instruction system.cpu.op_class_0::IntAlu 4331 67.53% 67.83% # Class of committed instruction system.cpu.op_class_0::IntMult 1 0.02% 67.85% # Class of committed instruction @@ -344,87 +344,85 @@ system.cpu.op_class_0::MemWrite 868 13.54% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 6413 # Class of committed instruction -system.cpu.tickCycles 12653 # Number of cycles that the object actually ticked -system.cpu.idleCycles 62335 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.cpu.tickCycles 12651 # Number of cycles that the object actually ticked +system.cpu.idleCycles 62993 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 0 # number of replacements -system.cpu.dcache.tags.tagsinuse 104.135823 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 1980 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 103.701168 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 1990 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 169 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 11.715976 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 11.775148 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 104.135823 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.025424 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.025424 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 103.701168 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.025318 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.025318 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 169 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 22 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 147 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.041260 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 4583 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 4583 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 1240 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 1240 # number of ReadReq hits +system.cpu.dcache.tags.tag_accesses 4591 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 4591 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 1250 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 1250 # number of ReadReq hits system.cpu.dcache.WriteReq_hits::cpu.data 740 # number of WriteReq hits system.cpu.dcache.WriteReq_hits::total 740 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 1980 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 1980 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 1980 # number of overall hits -system.cpu.dcache.overall_hits::total 1980 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 102 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 102 # number of ReadReq misses +system.cpu.dcache.demand_hits::cpu.data 1990 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 1990 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 1990 # number of overall hits +system.cpu.dcache.overall_hits::total 1990 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 96 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 96 # number of ReadReq misses system.cpu.dcache.WriteReq_misses::cpu.data 125 # number of WriteReq misses system.cpu.dcache.WriteReq_misses::total 125 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 227 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 227 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 227 # number of overall misses -system.cpu.dcache.overall_misses::total 227 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 8280500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 8280500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 9164500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 9164500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 17445000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 17445000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 17445000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 17445000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 1342 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 1342 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 221 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 221 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 221 # number of overall misses +system.cpu.dcache.overall_misses::total 221 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 7590000 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 7590000 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 9158000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 9158000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 16748000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 16748000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 16748000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 16748000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 1346 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 1346 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 865 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 865 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 2207 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 2207 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 2207 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 2207 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.076006 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.076006 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 2211 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 2211 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 2211 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 2211 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.071322 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.071322 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.144509 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.144509 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.102855 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.102855 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.102855 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.102855 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 81181.372549 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 81181.372549 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73316 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 73316 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 76850.220264 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 76850.220264 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 76850.220264 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 76850.220264 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.099955 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.099955 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.099955 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.099955 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 79062.500000 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 79062.500000 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73264 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 73264 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 75782.805430 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 75782.805430 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 75782.805430 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 75782.805430 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 6 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 6 # number of ReadReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::cpu.data 52 # number of WriteReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::total 52 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 58 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 58 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 58 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 58 # number of overall MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 52 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 52 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 52 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 52 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 96 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 96 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 73 # number of WriteReq MSHR misses @@ -433,83 +431,83 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 169 system.cpu.dcache.demand_mshr_misses::total 169 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 169 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 169 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 7723000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 7723000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5385500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 5385500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 13108500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 13108500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 13108500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 13108500 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.071535 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.071535 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 7494000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 7494000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5379500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 5379500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 12873500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 12873500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 12873500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 12873500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.071322 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.071322 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.084393 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.084393 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.076575 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.076575 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.076575 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.076575 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 80447.916667 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 80447.916667 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73773.972603 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73773.972603 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 77565.088757 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 77565.088757 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 77565.088757 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 77565.088757 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.076436 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.076436 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.076436 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.076436 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 78062.500000 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 78062.500000 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73691.780822 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73691.780822 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 76174.556213 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 76174.556213 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 76174.556213 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 76174.556213 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.cpu.icache.tags.replacements 0 # number of replacements -system.cpu.icache.tags.tagsinuse 175.312988 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 2323 # Total number of references to valid blocks. +system.cpu.icache.tags.tagsinuse 174.485780 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 2322 # Total number of references to valid blocks. system.cpu.icache.tags.sampled_refs 364 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 6.381868 # Average number of references to valid blocks. +system.cpu.icache.tags.avg_refs 6.379121 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 175.312988 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.085602 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.085602 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 174.485780 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.085198 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.085198 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 364 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 106 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 258 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 105 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 259 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.177734 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 5738 # Number of tag accesses -system.cpu.icache.tags.data_accesses 5738 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 2323 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 2323 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 2323 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 2323 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 2323 # number of overall hits -system.cpu.icache.overall_hits::total 2323 # number of overall hits +system.cpu.icache.tags.tag_accesses 5736 # Number of tag accesses +system.cpu.icache.tags.data_accesses 5736 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 2322 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 2322 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 2322 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 2322 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 2322 # number of overall hits +system.cpu.icache.overall_hits::total 2322 # number of overall hits system.cpu.icache.ReadReq_misses::cpu.inst 364 # number of ReadReq misses system.cpu.icache.ReadReq_misses::total 364 # number of ReadReq misses system.cpu.icache.demand_misses::cpu.inst 364 # number of demand (read+write) misses system.cpu.icache.demand_misses::total 364 # number of demand (read+write) misses system.cpu.icache.overall_misses::cpu.inst 364 # number of overall misses system.cpu.icache.overall_misses::total 364 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 27766000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 27766000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 27766000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 27766000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 27766000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 27766000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 2687 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 2687 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 2687 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 2687 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 2687 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 2687 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.135467 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.135467 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.135467 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.135467 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.135467 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.135467 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 76280.219780 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 76280.219780 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 76280.219780 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 76280.219780 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 76280.219780 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 76280.219780 # average overall miss latency +system.cpu.icache.ReadReq_miss_latency::cpu.inst 28087500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 28087500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 28087500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 28087500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 28087500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 28087500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 2686 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 2686 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 2686 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 2686 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 2686 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 2686 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.135517 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.135517 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.135517 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.135517 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.135517 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.135517 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 77163.461538 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 77163.461538 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 77163.461538 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 77163.461538 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 77163.461538 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 77163.461538 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -522,43 +520,43 @@ system.cpu.icache.demand_mshr_misses::cpu.inst 364 system.cpu.icache.demand_mshr_misses::total 364 # number of demand (read+write) MSHR misses system.cpu.icache.overall_mshr_misses::cpu.inst 364 # number of overall MSHR misses system.cpu.icache.overall_mshr_misses::total 364 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 27402000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 27402000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 27402000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 27402000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 27402000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 27402000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.135467 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.135467 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.135467 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.135467 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.135467 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.135467 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 75280.219780 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 75280.219780 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 75280.219780 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 75280.219780 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 75280.219780 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 75280.219780 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 27723500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 27723500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 27723500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 27723500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 27723500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 27723500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.135517 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.135517 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.135517 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.135517 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.135517 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.135517 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 76163.461538 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 76163.461538 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 76163.461538 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 76163.461538 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 76163.461538 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 76163.461538 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 233.336913 # Cycle average of tags in use +system.cpu.l2cache.tags.tagsinuse 232.271171 # Cycle average of tags in use system.cpu.l2cache.tags.total_refs 1 # Total number of references to valid blocks. system.cpu.l2cache.tags.sampled_refs 459 # Sample count of references to valid blocks. system.cpu.l2cache.tags.avg_refs 0.002179 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::cpu.inst 175.327844 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 58.009069 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.005351 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.001770 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.007121 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_blocks::cpu.inst 174.500375 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 57.770796 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.005325 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.001763 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.007088 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 459 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 122 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 337 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 121 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 338 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.014008 # Percentage of cache occupancy per task id system.cpu.l2cache.tags.tag_accesses 4796 # Number of tag accesses system.cpu.l2cache.tags.data_accesses 4796 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 1 # number of ReadCleanReq hits system.cpu.l2cache.ReadCleanReq_hits::total 1 # number of ReadCleanReq hits system.cpu.l2cache.demand_hits::cpu.inst 1 # number of demand (read+write) hits @@ -577,18 +575,18 @@ system.cpu.l2cache.demand_misses::total 532 # nu system.cpu.l2cache.overall_misses::cpu.inst 363 # number of overall misses system.cpu.l2cache.overall_misses::cpu.data 169 # number of overall misses system.cpu.l2cache.overall_misses::total 532 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 5275000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 5275000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 26844000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 26844000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 7577500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 7577500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 26844000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 12852500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 39696500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 26844000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 12852500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 39696500 # number of overall miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 5270000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 5270000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 27166000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 27166000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 7348500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 7348500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 27166000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 12618500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 39784500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 27166000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 12618500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 39784500 # number of overall miss cycles system.cpu.l2cache.ReadExReq_accesses::cpu.data 73 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 73 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 364 # number of ReadCleanReq accesses(hits+misses) @@ -613,18 +611,18 @@ system.cpu.l2cache.demand_miss_rate::total 0.998124 # system.cpu.l2cache.overall_miss_rate::cpu.inst 0.997253 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::cpu.data 1 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::total 0.998124 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 72260.273973 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 72260.273973 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 73950.413223 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 73950.413223 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 78932.291667 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 78932.291667 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 73950.413223 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76050.295858 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 74617.481203 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 73950.413223 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76050.295858 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 74617.481203 # average overall miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 72191.780822 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 72191.780822 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74837.465565 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74837.465565 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 76546.875000 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 76546.875000 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74837.465565 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 74665.680473 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 74782.894737 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74837.465565 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 74665.680473 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 74782.894737 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -643,18 +641,18 @@ system.cpu.l2cache.demand_mshr_misses::total 532 system.cpu.l2cache.overall_mshr_misses::cpu.inst 363 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 169 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::total 532 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4545000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4545000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 23214000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 23214000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 6617500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 6617500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 23214000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 11162500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 34376500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 23214000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 11162500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 34376500 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4540000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4540000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 23536000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 23536000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 6388500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 6388500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 23536000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 10928500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 34464500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 23536000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 10928500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 34464500 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 1 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 1 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.997253 # mshr miss rate for ReadCleanReq accesses @@ -667,25 +665,25 @@ system.cpu.l2cache.demand_mshr_miss_rate::total 0.998124 system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.997253 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 1 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::total 0.998124 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 62260.273973 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 62260.273973 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63950.413223 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63950.413223 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 68932.291667 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 68932.291667 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63950.413223 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66050.295858 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64617.481203 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63950.413223 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66050.295858 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64617.481203 # average overall mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 62191.780822 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 62191.780822 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64837.465565 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64837.465565 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 66546.875000 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 66546.875000 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64837.465565 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 64665.680473 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64782.894737 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64837.465565 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 64665.680473 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64782.894737 # average overall mshr miss latency system.cpu.toL2Bus.snoop_filter.tot_requests 533 # Total number of requests made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_requests 1 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.cpu.toL2Bus.trans_dist::ReadResp 460 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 73 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 73 # Transaction distribution @@ -698,6 +696,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 10816 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 34112 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 533 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.001876 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.043315 # Request fanout histogram @@ -712,10 +711,10 @@ system.cpu.toL2Bus.snoop_fanout::total 533 # Re system.cpu.toL2Bus.reqLayer0.occupancy 266500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.7 # Layer utilization (%) system.cpu.toL2Bus.respLayer0.occupancy 546000 # Layer occupancy (ticks) -system.cpu.toL2Bus.respLayer0.utilization 1.5 # Layer utilization (%) +system.cpu.toL2Bus.respLayer0.utilization 1.4 # Layer utilization (%) system.cpu.toL2Bus.respLayer1.occupancy 253500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.7 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 37494000 # Cumulative time (in ticks) in various power states +system.membus.pwrStateResidencyTicks::UNDEFINED 37822000 # Cumulative time (in ticks) in various power states system.membus.trans_dist::ReadResp 459 # Transaction distribution system.membus.trans_dist::ReadExReq 73 # Transaction distribution system.membus.trans_dist::ReadExResp 73 # Transaction distribution @@ -725,6 +724,7 @@ system.membus.pkt_count::total 1064 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 34048 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 34048 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 532 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram @@ -735,9 +735,9 @@ system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Re system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram system.membus.snoop_fanout::total 532 # Request fanout histogram -system.membus.reqLayer0.occupancy 602500 # Layer occupancy (ticks) +system.membus.reqLayer0.occupancy 608000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 1.6 # Layer utilization (%) -system.membus.respLayer1.occupancy 2826750 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 2826500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 7.5 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/config.ini index a3681b4ff..81c1646b5 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -108,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -167,12 +177,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -515,12 +535,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -539,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -565,12 +595,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -589,8 +624,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -598,10 +638,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -632,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/alpha/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/linux/hello gid=100 input=cin kvmInSE=false @@ -664,10 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -711,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -722,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simerr b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simout b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simout index bd3dd6b17..b4b146baf 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/simout @@ -3,13 +3,13 @@ Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/o3-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 21:54:46 -gem5 started Mar 14 2016 21:57:45 -gem5 executing on phenom, pid 28188 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:27 +gem5 executing on e108600-lin, pid 39605 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Hello world! -Exiting @ tick 21972500 because target called exit() +Exiting @ tick 22019000 because target called exit() diff --git a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/stats.txt index 8d95bb8b7..0781260bf 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/linux/o3-timing/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000022 # Nu sim_ticks 22019000 # Number of ticks simulated final_tick 22019000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 117755 # Simulator instruction rate (inst/s) -host_op_rate 117735 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 405950936 # Simulator tick rate (ticks/s) -host_mem_usage 294524 # Number of bytes of host memory used +host_inst_rate 122018 # Simulator instruction rate (inst/s) +host_op_rate 121990 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 420608458 # Simulator tick rate (ticks/s) +host_mem_usage 250288 # Number of bytes of host memory used host_seconds 0.05 # Real time elapsed on the host sim_insts 6385 # Number of instructions simulated sim_ops 6385 # Number of ops (including micro ops) simulated @@ -948,6 +948,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 11072 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 31104 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 486 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.002058 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.045361 # Request fanout histogram @@ -975,6 +976,7 @@ system.membus.pkt_count::total 970 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 31040 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 31040 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 485 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/config.ini index 5f2701c66..c1171633d 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -71,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -118,7 +128,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/alpha/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/linux/hello gid=100 input=cin kvmInSE=false @@ -150,10 +160,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -168,11 +183,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simout index e982daec6..a049bb5ed 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 21:54:46 -gem5 started Mar 14 2016 21:56:00 -gem5 executing on phenom, pid 28087 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-atomic -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-atomic +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:27 +gem5 executing on e108600-lin, pid 39609 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/stats.txt index 281db070e..724287a51 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-atomic/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 3214500 # Number of ticks simulated final_tick 3214500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 879431 # Simulator instruction rate (inst/s) -host_op_rate 878309 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 440397606 # Simulator tick rate (ticks/s) -host_mem_usage 282472 # Number of bytes of host memory used +host_inst_rate 1026789 # Simulator instruction rate (inst/s) +host_op_rate 1024872 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 513643962 # Simulator tick rate (ticks/s) +host_mem_usage 238508 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 6403 # Number of instructions simulated sim_ops 6403 # Number of ops (including micro ops) simulated @@ -142,6 +142,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 25652 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 15500 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 41152 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 8463 # Request fanout histogram system.membus.snoop_fanout::mean 0.757769 # Request fanout histogram system.membus.snoop_fanout::stdev 0.428459 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/config.ini index 68b35910c..d2de1569b 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -70,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -88,12 +98,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -112,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -129,12 +149,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -153,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -179,12 +209,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -203,8 +238,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -212,10 +252,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -246,7 +291,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/alpha/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/linux/hello gid=100 input=cin kvmInSE=false @@ -278,10 +323,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -296,11 +346,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simerr b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simout b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simout index 9c12b76cc..7b601dbe7 100755 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 21:54:46 -gem5 started Mar 14 2016 21:56:12 -gem5 executing on phenom, pid 28101 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:28 +gem5 executing on e108600-lin, pid 39614 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/stats.txt index 0b95c7449..d4fc31bad 100644 --- a/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000036 # Nu sim_ticks 35682500 # Number of ticks simulated final_tick 35682500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 516760 # Simulator instruction rate (inst/s) -host_op_rate 516348 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2875227341 # Simulator tick rate (ticks/s) -host_mem_usage 291440 # Number of bytes of host memory used -host_seconds 0.01 # Real time elapsed on the host +host_inst_rate 318235 # Simulator instruction rate (inst/s) +host_op_rate 317806 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1768975757 # Simulator tick rate (ticks/s) +host_mem_usage 248500 # Number of bytes of host memory used +host_seconds 0.02 # Real time elapsed on the host sim_insts 6403 # Number of instructions simulated sim_ops 6403 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -468,6 +468,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 10752 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 28608 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 447 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.002237 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.047298 # Request fanout histogram @@ -495,6 +496,7 @@ system.membus.pkt_count::total 892 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 28544 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 28544 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 446 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/config.ini index 654daf7a1..ccd9350bc 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -59,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -101,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -142,12 +153,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -166,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -566,12 +587,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -590,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -616,12 +647,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -640,8 +676,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -649,10 +690,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -683,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/alpha/tru64/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/tru64/hello gid=100 input=cin kvmInSE=false @@ -715,10 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -762,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -773,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simerr b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simerr index c6957696d..b68e0fd83 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: ignoring syscall sigprocmask(1, ...) diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simout b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simout index 9179fdffe..115f46689 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/simout @@ -1,13 +1,15 @@ +Redirecting stdout to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/minor-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/minor-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 13:49:21 -gem5 started Jan 21 2016 13:50:13 -gem5 executing on zizzer, pid 34033 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/minor-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:24 +gem5 executing on e108600-lin, pid 39579 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Hello world! -Exiting @ tick 20075000 because target called exit() +Exiting @ tick 20329000 because target called exit() diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/stats.txt index b5156559e..ac371de2b 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/tru64/minor-timing/stats.txt @@ -1,19 +1,19 @@ ---------- Begin Simulation Statistics ---------- sim_seconds 0.000020 # Number of seconds simulated -sim_ticks 20320000 # Number of ticks simulated -final_tick 20320000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_ticks 20329000 # Number of ticks simulated +final_tick 20329000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 171591 # Simulator instruction rate (inst/s) -host_op_rate 171481 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1347191282 # Simulator tick rate (ticks/s) -host_mem_usage 293200 # Number of bytes of host memory used +host_inst_rate 113549 # Simulator instruction rate (inst/s) +host_op_rate 113428 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 891182571 # Simulator tick rate (ticks/s) +host_mem_usage 248724 # Number of bytes of host memory used host_seconds 0.02 # Real time elapsed on the host sim_insts 2585 # Number of instructions simulated sim_ops 2585 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.physmem.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.physmem.bytes_read::cpu.inst 14400 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 5440 # Number of bytes read from this memory system.physmem.bytes_read::total 19840 # Number of bytes read from this memory @@ -22,14 +22,14 @@ system.physmem.bytes_inst_read::total 14400 # Nu system.physmem.num_reads::cpu.inst 225 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 85 # Number of read requests responded to by this memory system.physmem.num_reads::total 310 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 708661417 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 267716535 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 976377953 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 708661417 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 708661417 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 708661417 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 267716535 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 976377953 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_read::cpu.inst 708347681 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 267598013 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 975945693 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 708347681 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 708347681 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 708347681 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 267598013 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 975945693 # Total bandwidth to/from this memory (bytes/s) system.physmem.readReqs 310 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted system.physmem.readBursts 310 # Number of DRAM read bursts, including those serviced by the write queue @@ -49,11 +49,11 @@ system.physmem.perBankRdBursts::3 24 # Pe system.physmem.perBankRdBursts::4 21 # Per bank write bursts system.physmem.perBankRdBursts::5 0 # Per bank write bursts system.physmem.perBankRdBursts::6 27 # Per bank write bursts -system.physmem.perBankRdBursts::7 47 # Per bank write bursts +system.physmem.perBankRdBursts::7 48 # Per bank write bursts system.physmem.perBankRdBursts::8 68 # Per bank write bursts system.physmem.perBankRdBursts::9 2 # Per bank write bursts system.physmem.perBankRdBursts::10 15 # Per bank write bursts -system.physmem.perBankRdBursts::11 16 # Per bank write bursts +system.physmem.perBankRdBursts::11 15 # Per bank write bursts system.physmem.perBankRdBursts::12 18 # Per bank write bursts system.physmem.perBankRdBursts::13 52 # Per bank write bursts system.physmem.perBankRdBursts::14 15 # Per bank write bursts @@ -76,7 +76,7 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 20232000 # Total gap between requests +system.physmem.totGap 20241500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) @@ -91,8 +91,8 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 245 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 62 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 246 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 61 # What read queue length does an incoming req see system.physmem.rdQLenPdf::2 3 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see @@ -189,71 +189,71 @@ system.physmem.wrQLenPdf::62 0 # Wh system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see system.physmem.bytesPerActivate::samples 41 # Bytes accessed per row activation system.physmem.bytesPerActivate::mean 429.268293 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 282.076610 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 329.225077 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 281.421645 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 332.320856 # Bytes accessed per row activation system.physmem.bytesPerActivate::0-127 10 24.39% 24.39% # Bytes accessed per row activation system.physmem.bytesPerActivate::128-255 6 14.63% 39.02% # Bytes accessed per row activation system.physmem.bytesPerActivate::256-383 4 9.76% 48.78% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 2 4.88% 53.66% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 7 17.07% 70.73% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 3 7.32% 78.05% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 3 7.32% 85.37% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 3 7.32% 56.10% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 6 14.63% 70.73% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 2 4.88% 75.61% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 4 9.76% 85.37% # Bytes accessed per row activation system.physmem.bytesPerActivate::896-1023 3 7.32% 92.68% # Bytes accessed per row activation system.physmem.bytesPerActivate::1024-1151 3 7.32% 100.00% # Bytes accessed per row activation system.physmem.bytesPerActivate::total 41 # Bytes accessed per row activation -system.physmem.totQLat 1648500 # Total ticks spent queuing -system.physmem.totMemAccLat 7461000 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totQLat 1774250 # Total ticks spent queuing +system.physmem.totMemAccLat 7586750 # Total ticks spent from burst creation until serviced by the DRAM system.physmem.totBusLat 1550000 # Total ticks spent in databus transfers -system.physmem.avgQLat 5317.74 # Average queueing delay per DRAM burst +system.physmem.avgQLat 5723.39 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 24067.74 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 976.38 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 24473.39 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 975.95 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 976.38 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 975.95 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 7.63 # Data bus utilization in percentage -system.physmem.busUtilRead 7.63 # Data bus utilization in percentage for reads +system.physmem.busUtil 7.62 # Data bus utilization in percentage +system.physmem.busUtilRead 7.62 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.25 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 259 # Number of row buffer hits during reads +system.physmem.readRowHits 260 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 83.55 # Row buffer hit rate for reads +system.physmem.readRowHitRate 83.87 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 65264.52 # Average gap between requests -system.physmem.pageHitRate 83.55 # Row buffer hit rate, read and write combined +system.physmem.avgGap 65295.16 # Average gap between requests +system.physmem.pageHitRate 83.87 # Row buffer hit rate, read and write combined system.physmem_0.actEnergy 83160 # Energy for activate commands per rank (pJ) system.physmem_0.preEnergy 45375 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 780000 # Energy for read commands per rank (pJ) +system.physmem_0.readEnergy 787800 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) system.physmem_0.refreshEnergy 1017120 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 10605420 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 196500 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 12727575 # Total energy per rank (pJ) -system.physmem_0.averagePower 803.889152 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 681250 # Time in different power states +system.physmem_0.actBackEnergy 10557540 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 238500 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 12729495 # Total energy per rank (pJ) +system.physmem_0.averagePower 804.010422 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 825750 # Time in different power states system.physmem_0.memoryStateTime::REF 520000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 15041250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 14971750 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states system.physmem_1.actEnergy 189000 # Energy for activate commands per rank (pJ) system.physmem_1.preEnergy 103125 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 1193400 # Energy for read commands per rank (pJ) +system.physmem_1.readEnergy 1185600 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) system.physmem_1.refreshEnergy 1017120 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 10488285 # Energy for active background per rank (pJ) +system.physmem_1.actBackEnergy 10490850 # Energy for active background per rank (pJ) system.physmem_1.preBackEnergy 299250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 13290180 # Total energy per rank (pJ) -system.physmem_1.averagePower 839.423970 # Core power per rank (mW) +system.physmem_1.totalEnergy 13284945 # Total energy per rank (pJ) +system.physmem_1.averagePower 838.894625 # Core power per rank (mW) system.physmem_1.memoryStateTime::IDLE 457000 # Time in different power states system.physmem_1.memoryStateTime::REF 520000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 14869250 # Time in different power states +system.physmem_1.memoryStateTime::ACT 14872250 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.branchPred.lookups 794 # Number of BP lookups -system.cpu.branchPred.condPredicted 395 # Number of conditional branches predicted +system.cpu.branchPred.condPredicted 394 # Number of conditional branches predicted system.cpu.branchPred.condIncorrect 170 # Number of conditional branches incorrect system.cpu.branchPred.BTBLookups 562 # Number of BTB lookups system.cpu.branchPred.BTBHits 54 # Number of BTB hits @@ -270,22 +270,22 @@ system.cpu.dtb.fetch_hits 0 # IT system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 510 # DTB read hits -system.cpu.dtb.read_misses 8 # DTB read misses -system.cpu.dtb.read_acv 1 # DTB read access violations -system.cpu.dtb.read_accesses 518 # DTB read accesses +system.cpu.dtb.read_hits 506 # DTB read hits +system.cpu.dtb.read_misses 6 # DTB read misses +system.cpu.dtb.read_acv 0 # DTB read access violations +system.cpu.dtb.read_accesses 512 # DTB read accesses system.cpu.dtb.write_hits 307 # DTB write hits system.cpu.dtb.write_misses 6 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations system.cpu.dtb.write_accesses 313 # DTB write accesses -system.cpu.dtb.data_hits 817 # DTB hits -system.cpu.dtb.data_misses 14 # DTB misses -system.cpu.dtb.data_acv 1 # DTB access violations -system.cpu.dtb.data_accesses 831 # DTB accesses -system.cpu.itb.fetch_hits 975 # ITB hits +system.cpu.dtb.data_hits 813 # DTB hits +system.cpu.dtb.data_misses 12 # DTB misses +system.cpu.dtb.data_acv 0 # DTB access violations +system.cpu.dtb.data_accesses 825 # DTB accesses +system.cpu.itb.fetch_hits 979 # ITB hits system.cpu.itb.fetch_misses 13 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 988 # ITB accesses +system.cpu.itb.fetch_accesses 992 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -299,16 +299,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 4 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 20320000 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 40640 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 20329000 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 40658 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 2585 # Number of instructions committed system.cpu.committedOps 2585 # Number of ops (including micro ops) committed -system.cpu.discardedOps 603 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 573 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 15.721470 # CPI: cycles per instruction -system.cpu.ipc 0.063607 # IPC: instructions per cycle +system.cpu.cpi 15.728433 # CPI: cycles per instruction +system.cpu.ipc 0.063579 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 189 7.31% 7.31% # Class of committed instruction system.cpu.op_class_0::IntAlu 1678 64.91% 72.22% # Class of committed instruction system.cpu.op_class_0::IntMult 1 0.04% 72.26% # Class of committed instruction @@ -344,87 +344,87 @@ system.cpu.op_class_0::MemWrite 298 11.53% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 2585 # Class of committed instruction -system.cpu.tickCycles 5416 # Number of cycles that the object actually ticked -system.cpu.idleCycles 35224 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.cpu.tickCycles 5421 # Number of cycles that the object actually ticked +system.cpu.idleCycles 35237 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 0 # number of replacements -system.cpu.dcache.tags.tagsinuse 48.513757 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 693 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 48.302993 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 692 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 85 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 8.152941 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 8.141176 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 48.513757 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.011844 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.011844 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 48.302993 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.011793 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.011793 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 85 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 33 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 52 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.020752 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 1679 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 1679 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 442 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 442 # number of ReadReq hits +system.cpu.dcache.tags.tag_accesses 1673 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 1673 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 441 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 441 # number of ReadReq hits system.cpu.dcache.WriteReq_hits::cpu.data 251 # number of WriteReq hits system.cpu.dcache.WriteReq_hits::total 251 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 693 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 693 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 693 # number of overall hits -system.cpu.dcache.overall_hits::total 693 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 61 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 61 # number of ReadReq misses +system.cpu.dcache.demand_hits::cpu.data 692 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 692 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 692 # number of overall hits +system.cpu.dcache.overall_hits::total 692 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 59 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 59 # number of ReadReq misses system.cpu.dcache.WriteReq_misses::cpu.data 43 # number of WriteReq misses system.cpu.dcache.WriteReq_misses::total 43 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 104 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 104 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 104 # number of overall misses -system.cpu.dcache.overall_misses::total 104 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 4723500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 4723500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 3258500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 3258500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 7982000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 7982000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 7982000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 7982000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 503 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 503 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 102 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 102 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 102 # number of overall misses +system.cpu.dcache.overall_misses::total 102 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 4783500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 4783500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 3258000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 3258000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 8041500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 8041500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 8041500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 8041500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 500 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 500 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 294 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 294 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 797 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 797 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 797 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 797 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.121272 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.121272 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 794 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 794 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 794 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 794 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.118000 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.118000 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.146259 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.146259 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.130489 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.130489 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.130489 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.130489 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 77434.426230 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 77434.426230 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 75779.069767 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 75779.069767 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 76750 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 76750 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 76750 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 76750 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.128463 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.128463 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.128463 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.128463 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 81076.271186 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 81076.271186 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 75767.441860 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 75767.441860 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 78838.235294 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 78838.235294 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 78838.235294 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 78838.235294 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 3 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 3 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 1 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 1 # number of ReadReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::cpu.data 16 # number of WriteReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::total 16 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 19 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 19 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 19 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 19 # number of overall MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 17 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 17 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 17 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 17 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 58 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 58 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 27 # number of WriteReq MSHR misses @@ -433,83 +433,83 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 85 system.cpu.dcache.demand_mshr_misses::total 85 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 85 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 85 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 4442500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 4442500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 4654000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 4654000 # number of ReadReq MSHR miss cycles system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 2017500 # number of WriteReq MSHR miss cycles system.cpu.dcache.WriteReq_mshr_miss_latency::total 2017500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 6460000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 6460000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 6460000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 6460000 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.115308 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.115308 # mshr miss rate for ReadReq accesses +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 6671500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 6671500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 6671500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 6671500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.116000 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.116000 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.091837 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.091837 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.106650 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.106650 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.106650 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.106650 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 76594.827586 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 76594.827586 # average ReadReq mshr miss latency +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.107053 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.107053 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.107053 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.107053 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 80241.379310 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 80241.379310 # average ReadReq mshr miss latency system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 74722.222222 # average WriteReq mshr miss latency system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 74722.222222 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 76000 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 76000 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 76000 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 76000 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 78488.235294 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 78488.235294 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 78488.235294 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 78488.235294 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.icache.tags.replacements 0 # number of replacements -system.cpu.icache.tags.tagsinuse 119.123012 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 750 # Total number of references to valid blocks. +system.cpu.icache.tags.tagsinuse 119.197826 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 754 # Total number of references to valid blocks. system.cpu.icache.tags.sampled_refs 225 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 3.333333 # Average number of references to valid blocks. +system.cpu.icache.tags.avg_refs 3.351111 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 119.123012 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.058166 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.058166 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 119.197826 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.058202 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.058202 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 225 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 100 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::1 125 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.109863 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 2175 # Number of tag accesses -system.cpu.icache.tags.data_accesses 2175 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 750 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 750 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 750 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 750 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 750 # number of overall hits -system.cpu.icache.overall_hits::total 750 # number of overall hits +system.cpu.icache.tags.tag_accesses 2183 # Number of tag accesses +system.cpu.icache.tags.data_accesses 2183 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 754 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 754 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 754 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 754 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 754 # number of overall hits +system.cpu.icache.overall_hits::total 754 # number of overall hits system.cpu.icache.ReadReq_misses::cpu.inst 225 # number of ReadReq misses system.cpu.icache.ReadReq_misses::total 225 # number of ReadReq misses system.cpu.icache.demand_misses::cpu.inst 225 # number of demand (read+write) misses system.cpu.icache.demand_misses::total 225 # number of demand (read+write) misses system.cpu.icache.overall_misses::cpu.inst 225 # number of overall misses system.cpu.icache.overall_misses::total 225 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 17203000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 17203000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 17203000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 17203000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 17203000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 17203000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 975 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 975 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 975 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 975 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 975 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 975 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.230769 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.230769 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.230769 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.230769 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.230769 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.230769 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 76457.777778 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 76457.777778 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 76457.777778 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 76457.777778 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 76457.777778 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 76457.777778 # average overall miss latency +system.cpu.icache.ReadReq_miss_latency::cpu.inst 17116000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 17116000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 17116000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 17116000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 17116000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 17116000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 979 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 979 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 979 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 979 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 979 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 979 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.229826 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.229826 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.229826 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.229826 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.229826 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.229826 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 76071.111111 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 76071.111111 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 76071.111111 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 76071.111111 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 76071.111111 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 76071.111111 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -522,43 +522,43 @@ system.cpu.icache.demand_mshr_misses::cpu.inst 225 system.cpu.icache.demand_mshr_misses::total 225 # number of demand (read+write) MSHR misses system.cpu.icache.overall_mshr_misses::cpu.inst 225 # number of overall MSHR misses system.cpu.icache.overall_mshr_misses::total 225 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 16978000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 16978000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 16978000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 16978000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 16978000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 16978000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.230769 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.230769 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.230769 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.230769 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.230769 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.230769 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 75457.777778 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 75457.777778 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 75457.777778 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 75457.777778 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 75457.777778 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 75457.777778 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 16891000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 16891000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 16891000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 16891000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 16891000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 16891000 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.229826 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.229826 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.229826 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.229826 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.229826 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.229826 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 75071.111111 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 75071.111111 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 75071.111111 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 75071.111111 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 75071.111111 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 75071.111111 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 147.162900 # Cycle average of tags in use +system.cpu.l2cache.tags.tagsinuse 147.090026 # Cycle average of tags in use system.cpu.l2cache.tags.total_refs 0 # Total number of references to valid blocks. system.cpu.l2cache.tags.sampled_refs 283 # Sample count of references to valid blocks. system.cpu.l2cache.tags.avg_refs 0 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::cpu.inst 119.239277 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 27.923624 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.003639 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.000852 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.004491 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_blocks::cpu.inst 119.314039 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 27.775987 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.003641 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.000848 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.004489 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 283 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::0 131 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 152 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.008636 # Percentage of cache occupancy per task id system.cpu.l2cache.tags.tag_accesses 2790 # Number of tag accesses system.cpu.l2cache.tags.data_accesses 2790 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.ReadExReq_misses::cpu.data 27 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 27 # number of ReadExReq misses system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 225 # number of ReadCleanReq misses @@ -573,16 +573,16 @@ system.cpu.l2cache.overall_misses::cpu.data 85 # system.cpu.l2cache.overall_misses::total 310 # number of overall misses system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 1977000 # number of ReadExReq miss cycles system.cpu.l2cache.ReadExReq_miss_latency::total 1977000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 16640500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 16640500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 4354500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 4354500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 16640500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 6331500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 22972000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 16640500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 6331500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 22972000 # number of overall miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 16553500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 16553500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 4566000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 4566000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 16553500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 6543000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 23096500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 16553500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 6543000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 23096500 # number of overall miss cycles system.cpu.l2cache.ReadExReq_accesses::cpu.data 27 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 27 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 225 # number of ReadCleanReq accesses(hits+misses) @@ -609,16 +609,16 @@ system.cpu.l2cache.overall_miss_rate::cpu.data 1 system.cpu.l2cache.overall_miss_rate::total 1 # miss rate for overall accesses system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 73222.222222 # average ReadExReq miss latency system.cpu.l2cache.ReadExReq_avg_miss_latency::total 73222.222222 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 73957.777778 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 73957.777778 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 75077.586207 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 75077.586207 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 73957.777778 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 74488.235294 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 74103.225806 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 73957.777778 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 74488.235294 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 74103.225806 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 73571.111111 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 73571.111111 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 78724.137931 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 78724.137931 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 73571.111111 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76976.470588 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 74504.838710 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 73571.111111 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76976.470588 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 74504.838710 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -639,16 +639,16 @@ system.cpu.l2cache.overall_mshr_misses::cpu.data 85 system.cpu.l2cache.overall_mshr_misses::total 310 # number of overall MSHR misses system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 1707000 # number of ReadExReq MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 1707000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 14390500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 14390500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 3774500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 3774500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 14390500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 5481500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 19872000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 14390500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 5481500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 19872000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 14303500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 14303500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 3986000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 3986000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 14303500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 5693000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 19996500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 14303500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 5693000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 19996500 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 1 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 1 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 1 # mshr miss rate for ReadCleanReq accesses @@ -663,23 +663,23 @@ system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 1 system.cpu.l2cache.overall_mshr_miss_rate::total 1 # mshr miss rate for overall accesses system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 63222.222222 # average ReadExReq mshr miss latency system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 63222.222222 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63957.777778 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63957.777778 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 65077.586207 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 65077.586207 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63957.777778 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 64488.235294 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64103.225806 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63957.777778 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 64488.235294 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64103.225806 # average overall mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63571.111111 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63571.111111 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 68724.137931 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 68724.137931 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63571.111111 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66976.470588 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64504.838710 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63571.111111 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66976.470588 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64504.838710 # average overall mshr miss latency system.cpu.toL2Bus.snoop_filter.tot_requests 310 # Total number of requests made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_requests 0 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.cpu.toL2Bus.trans_dist::ReadResp 283 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 27 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 27 # Transaction distribution @@ -692,6 +692,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 5440 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 19840 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 310 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -709,7 +710,7 @@ system.cpu.toL2Bus.respLayer0.occupancy 337500 # La system.cpu.toL2Bus.respLayer0.utilization 1.7 # Layer utilization (%) system.cpu.toL2Bus.respLayer1.occupancy 127500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.6 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 20320000 # Cumulative time (in ticks) in various power states +system.membus.pwrStateResidencyTicks::UNDEFINED 20329000 # Cumulative time (in ticks) in various power states system.membus.trans_dist::ReadResp 283 # Transaction distribution system.membus.trans_dist::ReadExReq 27 # Transaction distribution system.membus.trans_dist::ReadExResp 27 # Transaction distribution @@ -719,6 +720,7 @@ system.membus.pkt_count::total 620 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 19840 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 19840 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 310 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram @@ -729,9 +731,9 @@ system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Re system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram system.membus.snoop_fanout::total 310 # Request fanout histogram -system.membus.reqLayer0.occupancy 363500 # Layer occupancy (ticks) +system.membus.reqLayer0.occupancy 363000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 1.8 # Layer utilization (%) -system.membus.respLayer1.occupancy 1649000 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 1648250 # Layer occupancy (ticks) system.membus.respLayer1.utilization 8.1 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/config.ini index 4281491aa..39c72e110 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -108,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -167,12 +177,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -515,12 +535,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -539,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -565,12 +595,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -589,8 +624,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -598,10 +638,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -632,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/alpha/tru64/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/tru64/hello gid=100 input=cin kvmInSE=false @@ -664,10 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -711,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -722,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simerr b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simerr index c828ff444..4e7b90c23 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simerr @@ -1,4 +1,5 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: ignoring syscall sigprocmask(1, ...) diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simout b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simout index 0abe9b40e..5515360ee 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/simout @@ -1,13 +1,15 @@ +Redirecting stdout to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/o3-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/o3-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 13:49:21 -gem5 started Jan 21 2016 13:50:12 -gem5 executing on zizzer, pid 34021 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/o3-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:24 +gem5 executing on e108600-lin, pid 39577 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/tru64/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Hello world! -Exiting @ tick 12363500 because target called exit() +Exiting @ tick 12409500 because target called exit() diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/stats.txt index 006581ce2..51e8f72d6 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000012 # Nu sim_ticks 12409500 # Number of ticks simulated final_tick 12409500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 52563 # Simulator instruction rate (inst/s) -host_op_rate 52553 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 273157641 # Simulator tick rate (ticks/s) -host_mem_usage 293200 # Number of bytes of host memory used -host_seconds 0.05 # Real time elapsed on the host +host_inst_rate 95060 # Simulator instruction rate (inst/s) +host_op_rate 95002 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 493600045 # Simulator tick rate (ticks/s) +host_mem_usage 248984 # Number of bytes of host memory used +host_seconds 0.03 # Real time elapsed on the host sim_insts 2387 # Number of instructions simulated sim_ops 2387 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -941,6 +941,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 5440 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 17408 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 272 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -968,6 +969,7 @@ system.membus.pkt_count::total 544 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 17408 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 17408 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 272 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/config.ini index 164e856da..21de058a2 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -71,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -118,7 +128,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/alpha/tru64/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/tru64/hello gid=100 input=cin kvmInSE=false @@ -150,10 +160,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -168,11 +183,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simerr b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simerr index 0b3033cd9..05d0e3f61 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simerr @@ -1,2 +1,3 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: ignoring syscall sigprocmask(1, ...) diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simout b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simout index ec449ee9d..4e010ba65 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-atomic/simout +Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 13:49:21 -gem5 started Jan 21 2016 13:50:01 -gem5 executing on zizzer, pid 33991 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-atomic -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-atomic +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:25 +gem5 executing on e108600-lin, pid 39590 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/tru64/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/stats.txt index fe81a2b88..a36aefa9a 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000001 # Nu sim_ticks 1297500 # Number of ticks simulated final_tick 1297500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 615280 # Simulator instruction rate (inst/s) -host_op_rate 613973 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 308554926 # Simulator tick rate (ticks/s) -host_mem_usage 281160 # Number of bytes of host memory used -host_seconds 0.00 # Real time elapsed on the host +host_inst_rate 290379 # Simulator instruction rate (inst/s) +host_op_rate 289620 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 145475657 # Simulator tick rate (ticks/s) +host_mem_usage 238224 # Number of bytes of host memory used +host_seconds 0.01 # Real time elapsed on the host sim_insts 2577 # Number of instructions simulated sim_ops 2577 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -142,6 +142,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 10340 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 5074 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 15414 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 3294 # Request fanout histogram system.membus.snoop_fanout::mean 0.784760 # Request fanout histogram system.membus.snoop_fanout::stdev 0.411051 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/config.ini b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/config.ini index ea2d2a5a5..14ec42af5 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -70,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -88,12 +98,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -112,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -129,12 +149,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -153,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -179,12 +209,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -203,8 +238,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -212,10 +252,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -246,7 +291,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/alpha/tru64/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/alpha/tru64/hello gid=100 input=cin kvmInSE=false @@ -278,10 +323,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -296,11 +346,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simerr b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simerr index 0b3033cd9..05d0e3f61 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simerr @@ -1,2 +1,3 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: ignoring syscall sigprocmask(1, ...) diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simout b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simout index bad9ca9c2..af6b46ed3 100755 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 13:49:21 -gem5 started Jan 21 2016 13:50:15 -gem5 executing on zizzer, pid 34051 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 19 2016 12:24:23 +gem5 executing on e108600-lin, pid 39547 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/quick/se/00.hello/alpha/tru64/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/alpha/tru64/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/stats.txt index a94783b9b..c5f7031d7 100644 --- a/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/alpha/tru64/simple-timing/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000018 # Nu sim_ticks 18239500 # Number of ticks simulated final_tick 18239500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 346390 # Simulator instruction rate (inst/s) -host_op_rate 345952 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2445638693 # Simulator tick rate (ticks/s) -host_mem_usage 291156 # Number of bytes of host memory used +host_inst_rate 190443 # Simulator instruction rate (inst/s) +host_op_rate 190287 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1345802218 # Simulator tick rate (ticks/s) +host_mem_usage 247188 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 2577 # Number of instructions simulated sim_ops 2577 # Number of ops (including micro ops) simulated @@ -462,6 +462,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 5248 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 15680 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 245 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -489,6 +490,7 @@ system.membus.pkt_count::total 490 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 15680 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 15680 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 245 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/config.ini b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/config.ini index 0858c144d..a47bafcf6 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -59,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -103,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -144,12 +155,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -168,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -192,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -208,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -604,12 +635,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -628,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -687,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -703,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -716,12 +767,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -740,8 +796,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -749,10 +810,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -783,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -815,10 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -862,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -873,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simerr b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simout b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simout index b3cb615d2..21abd8071 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/simout @@ -1,12 +1,14 @@ +Redirecting stdout to build/ARM/tests/opt/quick/se/00.hello/arm/linux/minor-timing/simout +Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/minor-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:45:42 -gem5 started Jan 21 2016 14:46:59 -gem5 executing on zizzer, pid 20780 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/minor-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:22 +gem5 executing on e108600-lin, pid 23083 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... Hello world! -Exiting @ tick 29949500 because target called exit() +Exiting @ tick 30083500 because target called exit() diff --git a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/stats.txt index acde8b0d6..ebafeb85e 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/minor-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/minor-timing/stats.txt @@ -1,19 +1,19 @@ ---------- Begin Simulation Statistics ---------- sim_seconds 0.000030 # Number of seconds simulated -sim_ticks 29977500 # Number of ticks simulated -final_tick 29977500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_ticks 30083500 # Number of ticks simulated +final_tick 30083500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 147440 # Simulator instruction rate (inst/s) -host_op_rate 172555 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 959274014 # Simulator tick rate (ticks/s) -host_mem_usage 309288 # Number of bytes of host memory used -host_seconds 0.03 # Real time elapsed on the host +host_inst_rate 80042 # Simulator instruction rate (inst/s) +host_op_rate 93682 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 522670316 # Simulator tick rate (ticks/s) +host_mem_usage 264608 # Number of bytes of host memory used +host_seconds 0.06 # Real time elapsed on the host sim_insts 4605 # Number of instructions simulated sim_ops 5391 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.physmem.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.physmem.bytes_read::cpu.inst 19520 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 7424 # Number of bytes read from this memory system.physmem.bytes_read::total 26944 # Number of bytes read from this memory @@ -22,14 +22,14 @@ system.physmem.bytes_inst_read::total 19520 # Nu system.physmem.num_reads::cpu.inst 305 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 116 # Number of read requests responded to by this memory system.physmem.num_reads::total 421 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 651155033 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 247652406 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 898807439 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 651155033 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 651155033 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 651155033 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 247652406 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 898807439 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_read::cpu.inst 648860671 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 246779796 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 895640467 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 648860671 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 648860671 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 648860671 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 246779796 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 895640467 # Total bandwidth to/from this memory (bytes/s) system.physmem.readReqs 421 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted system.physmem.readBursts 421 # Number of DRAM read bursts, including those serviced by the write queue @@ -76,7 +76,7 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 29886000 # Total gap between requests +system.physmem.totGap 29992500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 347 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 66 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 8 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 344 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 70 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 7 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -189,84 +189,84 @@ system.physmem.wrQLenPdf::62 0 # Wh system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see system.physmem.bytesPerActivate::samples 62 # Bytes accessed per row activation system.physmem.bytesPerActivate::mean 408.774194 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 287.393665 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 328.869570 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 287.809352 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 328.256468 # Bytes accessed per row activation system.physmem.bytesPerActivate::0-127 8 12.90% 12.90% # Bytes accessed per row activation system.physmem.bytesPerActivate::128-255 17 27.42% 40.32% # Bytes accessed per row activation system.physmem.bytesPerActivate::256-383 12 19.35% 59.68% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 5 8.06% 67.74% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 4 6.45% 74.19% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 6 9.68% 69.35% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 3 4.84% 74.19% # Bytes accessed per row activation system.physmem.bytesPerActivate::640-767 3 4.84% 79.03% # Bytes accessed per row activation system.physmem.bytesPerActivate::768-895 3 4.84% 83.87% # Bytes accessed per row activation system.physmem.bytesPerActivate::896-1023 1 1.61% 85.48% # Bytes accessed per row activation system.physmem.bytesPerActivate::1024-1151 9 14.52% 100.00% # Bytes accessed per row activation system.physmem.bytesPerActivate::total 62 # Bytes accessed per row activation -system.physmem.totQLat 2113500 # Total ticks spent queuing -system.physmem.totMemAccLat 10007250 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totQLat 2221000 # Total ticks spent queuing +system.physmem.totMemAccLat 10114750 # Total ticks spent from burst creation until serviced by the DRAM system.physmem.totBusLat 2105000 # Total ticks spent in databus transfers -system.physmem.avgQLat 5020.19 # Average queueing delay per DRAM burst +system.physmem.avgQLat 5275.53 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 23770.19 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 898.81 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 24025.53 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 895.64 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 898.81 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 895.64 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 7.02 # Data bus utilization in percentage -system.physmem.busUtilRead 7.02 # Data bus utilization in percentage for reads +system.physmem.busUtil 7.00 # Data bus utilization in percentage +system.physmem.busUtilRead 7.00 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes -system.physmem.avgRdQLen 1.21 # Average read queue length when enqueuing +system.physmem.avgRdQLen 1.22 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing system.physmem.readRowHits 350 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes system.physmem.readRowHitRate 83.14 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 70988.12 # Average gap between requests +system.physmem.avgGap 71241.09 # Average gap between requests system.physmem.pageHitRate 83.14 # Row buffer hit rate, read and write combined system.physmem_0.actEnergy 272160 # Energy for activate commands per rank (pJ) system.physmem_0.preEnergy 148500 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 1973400 # Energy for read commands per rank (pJ) +system.physmem_0.readEnergy 1965600 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) system.physmem_0.refreshEnergy 1525680 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 16099650 # Energy for active background per rank (pJ) +system.physmem_0.actBackEnergy 16103925 # Energy for active background per rank (pJ) system.physmem_0.preBackEnergy 48750 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 20068140 # Total energy per rank (pJ) -system.physmem_0.averagePower 849.669860 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 12500 # Time in different power states +system.physmem_0.totalEnergy 20064615 # Total energy per rank (pJ) +system.physmem_0.averagePower 849.295873 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 12000 # Time in different power states system.physmem_0.memoryStateTime::REF 780000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 22840000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 22845750 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states system.physmem_1.actEnergy 128520 # Energy for activate commands per rank (pJ) system.physmem_1.preEnergy 70125 # Energy for precharge commands per rank (pJ) system.physmem_1.readEnergy 694200 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) system.physmem_1.refreshEnergy 1525680 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 15745680 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 359250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 18523455 # Total energy per rank (pJ) -system.physmem_1.averagePower 784.269066 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 1654750 # Time in different power states +system.physmem_1.actBackEnergy 15554160 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 527250 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 18499935 # Total energy per rank (pJ) +system.physmem_1.averagePower 783.273247 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 2015750 # Time in different power states system.physmem_1.memoryStateTime::REF 780000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 22324250 # Time in different power states +system.physmem_1.memoryStateTime::ACT 22043250 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 1949 # Number of BP lookups -system.cpu.branchPred.condPredicted 1165 # Number of conditional branches predicted +system.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 1968 # Number of BP lookups +system.cpu.branchPred.condPredicted 1178 # Number of conditional branches predicted system.cpu.branchPred.condIncorrect 351 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 1641 # Number of BTB lookups -system.cpu.branchPred.BTBHits 316 # Number of BTB hits +system.cpu.branchPred.BTBLookups 1660 # Number of BTB lookups +system.cpu.branchPred.BTBHits 322 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 19.256551 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 222 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 19.397590 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 220 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 16 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 133 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectLookups 135 # Number of indirect predictor lookups. system.cpu.branchPred.indirectHits 8 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 125 # Number of indirect misses. +system.cpu.branchPred.indirectMisses 127 # Number of indirect misses. system.cpu.branchPredindirectMispredicted 62 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -296,7 +296,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -326,7 +326,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -356,7 +356,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -387,16 +387,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 13 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 29977500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 59955 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 30083500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 60167 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 4605 # Number of instructions committed system.cpu.committedOps 5391 # Number of ops (including micro ops) committed -system.cpu.discardedOps 1202 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 1193 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 13.019544 # CPI: cycles per instruction -system.cpu.ipc 0.076808 # IPC: instructions per cycle +system.cpu.cpi 13.065581 # CPI: cycles per instruction +system.cpu.ipc 0.076537 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 3419 63.42% 63.42% # Class of committed instruction system.cpu.op_class_0::IntMult 4 0.07% 63.49% # Class of committed instruction @@ -432,95 +432,95 @@ system.cpu.op_class_0::MemWrite 938 17.40% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 5391 # Class of committed instruction -system.cpu.tickCycles 10654 # Number of cycles that the object actually ticked -system.cpu.idleCycles 49301 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.tickCycles 10719 # Number of cycles that the object actually ticked +system.cpu.idleCycles 49448 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 0 # number of replacements -system.cpu.dcache.tags.tagsinuse 86.495507 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 1916 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 86.478936 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 1918 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 146 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 13.123288 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 13.136986 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 86.495507 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.021117 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.021117 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 86.478936 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.021113 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.021113 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 146 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 38 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 108 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.035645 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 4342 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 4342 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 1048 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 1048 # number of ReadReq hits +system.cpu.dcache.tags.tag_accesses 4334 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 4334 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 1050 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 1050 # number of ReadReq hits system.cpu.dcache.WriteReq_hits::cpu.data 846 # number of WriteReq hits system.cpu.dcache.WriteReq_hits::total 846 # number of WriteReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 11 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 11 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 11 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 11 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 1894 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 1894 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 1894 # number of overall hits -system.cpu.dcache.overall_hits::total 1894 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 115 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 115 # number of ReadReq misses +system.cpu.dcache.demand_hits::cpu.data 1896 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 1896 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 1896 # number of overall hits +system.cpu.dcache.overall_hits::total 1896 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 109 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 109 # number of ReadReq misses system.cpu.dcache.WriteReq_misses::cpu.data 67 # number of WriteReq misses system.cpu.dcache.WriteReq_misses::total 67 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 182 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 182 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 182 # number of overall misses -system.cpu.dcache.overall_misses::total 182 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 6977500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 6977500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 5011500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 5011500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 11989000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 11989000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 11989000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 11989000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 1163 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 1163 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 176 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 176 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 176 # number of overall misses +system.cpu.dcache.overall_misses::total 176 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 6690500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 6690500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 5002500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 5002500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 11693000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 11693000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 11693000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 11693000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 1159 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 1159 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 913 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 913 # number of WriteReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::cpu.data 11 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::total 11 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 11 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 11 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 2076 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 2076 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 2076 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 2076 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.098882 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.098882 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 2072 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 2072 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 2072 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 2072 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.094047 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.094047 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.073384 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.073384 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.087669 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.087669 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.087669 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.087669 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 60673.913043 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 60673.913043 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 74798.507463 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 74798.507463 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 65873.626374 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 65873.626374 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 65873.626374 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 65873.626374 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.084942 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.084942 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.084942 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.084942 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 61380.733945 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 61380.733945 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 74664.179104 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 74664.179104 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 66437.500000 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 66437.500000 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 66437.500000 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 66437.500000 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 12 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 12 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 6 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 6 # number of ReadReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::cpu.data 24 # number of WriteReq MSHR hits system.cpu.dcache.WriteReq_mshr_hits::total 24 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 36 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 36 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 36 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 36 # number of overall MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 30 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 30 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 30 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 30 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 103 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 103 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 43 # number of WriteReq MSHR misses @@ -529,83 +529,83 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 146 system.cpu.dcache.demand_mshr_misses::total 146 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 146 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 146 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 6370500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 6370500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 3194000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 3194000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 9564500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 9564500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 9564500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 9564500 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.088564 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.088564 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 6338000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 6338000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 3188000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 3188000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 9526000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 9526000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 9526000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 9526000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.088870 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.088870 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.047097 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.047097 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.070328 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.070328 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.070328 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.070328 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 61849.514563 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 61849.514563 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 74279.069767 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 74279.069767 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 65510.273973 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 65510.273973 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 65510.273973 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 65510.273973 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.070463 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.070463 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.070463 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.070463 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 61533.980583 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 61533.980583 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 74139.534884 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 74139.534884 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 65246.575342 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 65246.575342 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 65246.575342 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 65246.575342 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.icache.tags.replacements 4 # number of replacements -system.cpu.icache.tags.tagsinuse 162.122030 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 1926 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 323 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 5.962848 # Average number of references to valid blocks. +system.cpu.icache.tags.tagsinuse 161.834516 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 1963 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 322 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 6.096273 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 162.122030 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.079161 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.079161 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 319 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 161.834516 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.079021 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.079021 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 318 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 106 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 213 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.155762 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 4821 # Number of tag accesses -system.cpu.icache.tags.data_accesses 4821 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 1926 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 1926 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 1926 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 1926 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 1926 # number of overall hits -system.cpu.icache.overall_hits::total 1926 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 323 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 323 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 323 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 323 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 323 # number of overall misses -system.cpu.icache.overall_misses::total 323 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 23530000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 23530000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 23530000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 23530000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 23530000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 23530000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 2249 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 2249 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 2249 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 2249 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 2249 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 2249 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.143619 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.143619 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.143619 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.143619 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.143619 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.143619 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 72848.297214 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 72848.297214 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 72848.297214 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 72848.297214 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 72848.297214 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 72848.297214 # average overall miss latency +system.cpu.icache.tags.age_task_id_blocks_1024::1 212 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.155273 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 4892 # Number of tag accesses +system.cpu.icache.tags.data_accesses 4892 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 1963 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 1963 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 1963 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 1963 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 1963 # number of overall hits +system.cpu.icache.overall_hits::total 1963 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 322 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 322 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 322 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 322 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 322 # number of overall misses +system.cpu.icache.overall_misses::total 322 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 23678000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 23678000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 23678000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 23678000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 23678000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 23678000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 2285 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 2285 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 2285 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 2285 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 2285 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 2285 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.140919 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.140919 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.140919 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.140919 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.140919 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.140919 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 73534.161491 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 73534.161491 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 73534.161491 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 73534.161491 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 73534.161491 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 73534.161491 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -614,61 +614,61 @@ system.cpu.icache.avg_blocked_cycles::no_mshrs nan system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.icache.writebacks::writebacks 4 # number of writebacks system.cpu.icache.writebacks::total 4 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 323 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 323 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 323 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 323 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 323 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 323 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 23207000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 23207000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 23207000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 23207000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 23207000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 23207000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.143619 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.143619 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.143619 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.143619 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.143619 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.143619 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 71848.297214 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 71848.297214 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 71848.297214 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 71848.297214 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 71848.297214 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 71848.297214 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 322 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 322 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 322 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 322 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 322 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 322 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 23356000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 23356000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 23356000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 23356000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 23356000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 23356000 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.140919 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.140919 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.140919 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.140919 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.140919 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.140919 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 72534.161491 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 72534.161491 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 72534.161491 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 72534.161491 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 72534.161491 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 72534.161491 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 195.781809 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 43 # Total number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 195.879475 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 42 # Total number of references to valid blocks. system.cpu.l2cache.tags.sampled_refs 378 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 0.113757 # Average number of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 0.111111 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::cpu.inst 154.633330 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 41.148479 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.004719 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.001256 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.005975 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_blocks::cpu.inst 154.746810 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 41.132665 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.004722 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.001255 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.005978 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 378 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::0 124 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 254 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.011536 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 4197 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 4197 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.tag_accesses 4189 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 4189 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.WritebackClean_hits::writebacks 3 # number of WritebackClean hits system.cpu.l2cache.WritebackClean_hits::total 3 # number of WritebackClean hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 18 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 18 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 17 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 17 # number of ReadCleanReq hits system.cpu.l2cache.ReadSharedReq_hits::cpu.data 22 # number of ReadSharedReq hits system.cpu.l2cache.ReadSharedReq_hits::total 22 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 18 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.inst 17 # number of demand (read+write) hits system.cpu.l2cache.demand_hits::cpu.data 22 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 40 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 18 # number of overall hits +system.cpu.l2cache.demand_hits::total 39 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 17 # number of overall hits system.cpu.l2cache.overall_hits::cpu.data 22 # number of overall hits -system.cpu.l2cache.overall_hits::total 40 # number of overall hits +system.cpu.l2cache.overall_hits::total 39 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 43 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 43 # number of ReadExReq misses system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 305 # number of ReadCleanReq misses @@ -681,56 +681,56 @@ system.cpu.l2cache.demand_misses::total 429 # nu system.cpu.l2cache.overall_misses::cpu.inst 305 # number of overall misses system.cpu.l2cache.overall_misses::cpu.data 124 # number of overall misses system.cpu.l2cache.overall_misses::total 429 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 3129500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 3129500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 22515500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 22515500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 5956000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 5956000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 22515500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 9085500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 31601000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 22515500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 9085500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 31601000 # number of overall miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 3123500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 3123500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 22677500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 22677500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 5924000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 5924000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 22677500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 9047500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 31725000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 22677500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 9047500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 31725000 # number of overall miss cycles system.cpu.l2cache.WritebackClean_accesses::writebacks 3 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.WritebackClean_accesses::total 3 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 43 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 43 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 323 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 323 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 322 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 322 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 103 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::total 103 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 323 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.inst 322 # number of demand (read+write) accesses system.cpu.l2cache.demand_accesses::cpu.data 146 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 469 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 323 # number of overall (read+write) accesses +system.cpu.l2cache.demand_accesses::total 468 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 322 # number of overall (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.data 146 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 469 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 468 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 1 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 1 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.944272 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.944272 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.947205 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.947205 # miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.786408 # miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.786408 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.944272 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.947205 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate::cpu.data 0.849315 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.914712 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.944272 # miss rate for overall accesses +system.cpu.l2cache.demand_miss_rate::total 0.916667 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.947205 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::cpu.data 0.849315 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.914712 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 72779.069767 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 72779.069767 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 73821.311475 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 73821.311475 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 73530.864198 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 73530.864198 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 73821.311475 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 73270.161290 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 73662.004662 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 73821.311475 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 73270.161290 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 73662.004662 # average overall miss latency +system.cpu.l2cache.overall_miss_rate::total 0.916667 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 72639.534884 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 72639.534884 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74352.459016 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74352.459016 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 73135.802469 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 73135.802469 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74352.459016 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 72963.709677 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 73951.048951 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74352.459016 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 72963.709677 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 73951.048951 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -755,80 +755,81 @@ system.cpu.l2cache.demand_mshr_misses::total 421 system.cpu.l2cache.overall_mshr_misses::cpu.inst 305 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 116 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::total 421 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 2699500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 2699500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 19465500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 19465500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 4696000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 4696000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 19465500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 7395500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 26861000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 19465500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 7395500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 26861000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 2693500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 2693500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 19627500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 19627500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 4648000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 4648000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 19627500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 7341500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 26969000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 19627500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 7341500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 26969000 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 1 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 1 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.944272 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.944272 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.947205 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.947205 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.708738 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.708738 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.944272 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.947205 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.794521 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.897655 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.944272 # mshr miss rate for overall accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.899573 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.947205 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.794521 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.897655 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 62779.069767 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 62779.069767 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63821.311475 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63821.311475 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 64328.767123 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 64328.767123 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63821.311475 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 63754.310345 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 63802.850356 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63821.311475 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 63754.310345 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 63802.850356 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 473 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 51 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::total 0.899573 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 62639.534884 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 62639.534884 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64352.459016 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64352.459016 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 63671.232877 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 63671.232877 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64352.459016 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 63288.793103 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64059.382423 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64352.459016 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 63288.793103 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64059.382423 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 472 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 50 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 1 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 426 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 425 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackClean 4 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 43 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 43 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 323 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 322 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadSharedReq 103 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 650 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 648 # Packet count per connected master and slave (bytes) system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 292 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 942 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 20928 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 940 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 20864 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9344 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 30272 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 30208 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 469 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.102345 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.303426 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 468 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.100427 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.300891 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 421 89.77% 89.77% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 48 10.23% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 421 89.96% 89.96% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 47 10.04% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 469 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 240500 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 468 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 240000 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.8 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 484500 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 483000 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 1.6 # Layer utilization (%) system.cpu.toL2Bus.respLayer1.occupancy 222992 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.7 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 29977500 # Cumulative time (in ticks) in various power states +system.membus.pwrStateResidencyTicks::UNDEFINED 30083500 # Cumulative time (in ticks) in various power states system.membus.trans_dist::ReadResp 378 # Transaction distribution system.membus.trans_dist::ReadExReq 43 # Transaction distribution system.membus.trans_dist::ReadExResp 43 # Transaction distribution @@ -838,6 +839,7 @@ system.membus.pkt_count::total 842 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 26944 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 26944 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 421 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram @@ -848,9 +850,9 @@ system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Re system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram system.membus.snoop_fanout::total 421 # Request fanout histogram -system.membus.reqLayer0.occupancy 489000 # Layer occupancy (ticks) +system.membus.reqLayer0.occupancy 490000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 1.6 # Layer utilization (%) -system.membus.respLayer1.occupancy 2236750 # Layer occupancy (ticks) -system.membus.respLayer1.utilization 7.5 # Layer utilization (%) +system.membus.respLayer1.occupancy 2237750 # Layer occupancy (ticks) +system.membus.respLayer1.utilization 7.4 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/config.ini b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/config.ini index db680b227..78e5f6bf3 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -168,6 +178,7 @@ children=dstage2_mmu dtb isa istage2_mmu itb tracer checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -186,6 +197,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -216,9 +231,14 @@ walker=system.cpu.checker.dstage2_mmu.stage2_tlb.walker [system.cpu.checker.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.dtb] @@ -232,9 +252,14 @@ walker=system.cpu.checker.dtb.walker [system.cpu.checker.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[5] @@ -288,9 +313,14 @@ walker=system.cpu.checker.istage2_mmu.stage2_tlb.walker [system.cpu.checker.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.itb] @@ -304,9 +334,14 @@ walker=system.cpu.checker.itb.walker [system.cpu.checker.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[4] @@ -321,12 +356,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -345,8 +385,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -369,9 +414,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -385,9 +435,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -705,12 +760,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -729,8 +789,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -788,9 +853,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -804,9 +874,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -817,12 +892,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -841,8 +921,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -850,10 +935,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -884,7 +974,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -916,10 +1006,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -963,6 +1058,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -974,7 +1070,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simerr b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simerr index 341b479f7..57447a9b7 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simerr @@ -1,2 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simout b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simout index 97296d3da..8c38643eb 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing- gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 13 2016 22:35:59 -gem5 started Mar 13 2016 22:47:14 -gem5 executing on phenom, pid 19877 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing-checker -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing-checker +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:41:21 +gem5 executing on e108600-lin, pid 23122 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing-checker -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/o3-timing-checker Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/stats.txt index e232e499c..f9ef4c918 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing-checker/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000017 # Nu sim_ticks 17232500 # Number of ticks simulated final_tick 17232500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 78702 # Simulator instruction rate (inst/s) -host_op_rate 92158 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 295258113 # Simulator tick rate (ticks/s) -host_mem_usage 310332 # Number of bytes of host memory used -host_seconds 0.06 # Real time elapsed on the host +host_inst_rate 43349 # Simulator instruction rate (inst/s) +host_op_rate 50759 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 162616088 # Simulator tick rate (ticks/s) +host_mem_usage 265888 # Number of bytes of host memory used +host_seconds 0.11 # Real time elapsed on the host sim_insts 4592 # Number of instructions simulated sim_ops 5378 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1198,6 +1198,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9408 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 28352 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 441 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.099773 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.300038 # Request fanout histogram @@ -1225,6 +1226,7 @@ system.membus.pkt_count::total 794 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 25408 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 25408 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 397 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/config.ini b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/config.ini index 4403ba687..fc0ffed98 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simerr b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simout b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simout index e4ae04024..10d18de27 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 13 2016 22:35:59 -gem5 started Mar 13 2016 22:47:14 -gem5 executing on phenom, pid 19874 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:07:36 +gem5 executing on e108600-lin, pid 24410 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/stats.txt index e81d385ba..1e33086fd 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000019 # Nu sim_ticks 18821000 # Number of ticks simulated final_tick 18821000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 84019 # Simulator instruction rate (inst/s) -host_op_rate 98384 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 344256847 # Simulator tick rate (ticks/s) -host_mem_usage 306884 # Number of bytes of host memory used -host_seconds 0.05 # Real time elapsed on the host +host_inst_rate 60061 # Simulator instruction rate (inst/s) +host_op_rate 70319 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 246027411 # Simulator tick rate (ticks/s) +host_mem_usage 262688 # Number of bytes of host memory used +host_seconds 0.08 # Real time elapsed on the host sim_insts 4592 # Number of instructions simulated sim_ops 5378 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1107,6 +1107,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9216 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 31168 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 454 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 897 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.549610 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.582523 # Request fanout histogram @@ -1134,6 +1135,7 @@ system.membus.pkt_count::total 885 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 28288 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 28288 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 443 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/config.ini b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/config.ini index 60fb7fd34..be532b0c0 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=system.cpu.checker clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -93,6 +103,7 @@ children=dstage2_mmu dtb isa istage2_mmu itb tracer checker=Null clk_domain=system.cpu_clk_domain cpu_id=-1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -111,6 +122,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -141,9 +156,14 @@ walker=system.cpu.checker.dstage2_mmu.stage2_tlb.walker [system.cpu.checker.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.dtb] @@ -157,9 +177,14 @@ walker=system.cpu.checker.dtb.walker [system.cpu.checker.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.isa] @@ -212,9 +237,14 @@ walker=system.cpu.checker.istage2_mmu.stage2_tlb.walker [system.cpu.checker.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.itb] @@ -228,9 +258,14 @@ walker=system.cpu.checker.itb.walker [system.cpu.checker.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.checker.tracer] @@ -256,9 +291,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -272,9 +312,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -332,9 +377,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -348,9 +398,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -368,7 +423,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -400,10 +455,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -418,11 +478,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simerr b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simerr index 1a4f96712..2b0e974b5 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simerr @@ -1 +1,3 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simout b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simout index dca8243ae..a4f08df89 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic-dummychecker/simout +Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic-dummychecker/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:45:42 -gem5 started Jan 21 2016 14:47:11 -gem5 executing on zizzer, pid 20787 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic-dummychecker -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic-dummychecker +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:49:47 +gem5 executing on e108600-lin, pid 23301 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic-dummychecker -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/simple-atomic-dummychecker Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/stats.txt index c4cb1f552..55d542711 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic-dummychecker/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 2695000 # Number of ticks simulated final_tick 2695000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 569364 # Simulator instruction rate (inst/s) -host_op_rate 666035 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 333433301 # Simulator tick rate (ticks/s) -host_mem_usage 299296 # Number of bytes of host memory used +host_inst_rate 427598 # Simulator instruction rate (inst/s) +host_op_rate 499586 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 249859240 # Simulator tick rate (ticks/s) +host_mem_usage 254616 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 4592 # Number of instructions simulated sim_ops 5378 # Number of ops (including micro ops) simulated @@ -359,6 +359,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 18420 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 8139 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 26559 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 6532 # Request fanout histogram system.membus.snoop_fanout::mean 0.704991 # Request fanout histogram system.membus.snoop_fanout::stdev 0.456082 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/config.ini index 40d4f88c7..8f8064fa0 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -106,9 +116,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -122,9 +137,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -182,9 +202,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -198,9 +223,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -218,7 +248,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -250,10 +280,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -268,11 +303,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simout index 98eb95060..813c1fdca 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-ato gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:45:42 -gem5 started Jan 21 2016 14:46:25 -gem5 executing on zizzer, pid 20745 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:23 +gem5 executing on e108600-lin, pid 23087 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/stats.txt index a84dba320..43260b12f 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-atomic/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 2695000 # Number of ticks simulated final_tick 2695000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 625339 # Simulator instruction rate (inst/s) -host_op_rate 731490 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 366164896 # Simulator tick rate (ticks/s) -host_mem_usage 298280 # Number of bytes of host memory used +host_inst_rate 433184 # Simulator instruction rate (inst/s) +host_op_rate 506134 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 253162440 # Simulator tick rate (ticks/s) +host_mem_usage 254364 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 4592 # Number of instructions simulated sim_ops 5378 # Number of ops (including micro ops) simulated @@ -235,6 +235,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 18420 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 8139 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 26559 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 6532 # Request fanout histogram system.membus.snoop_fanout::mean 0.704991 # Request fanout histogram system.membus.snoop_fanout::stdev 0.456082 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/config.ini b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/config.ini index 3fd071b25..b1081da03 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -90,12 +100,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -114,8 +129,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -138,9 +158,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -154,9 +179,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -167,12 +197,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +226,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -250,9 +290,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -266,9 +311,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -279,12 +329,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -303,8 +358,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -312,10 +372,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -346,7 +411,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/arm/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/arm/linux/hello gid=100 input=cin kvmInSE=false @@ -378,10 +443,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -396,11 +466,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simerr b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simout b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simout index daa769407..4f7f76cdc 100755 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:45:42 -gem5 started Jan 21 2016 14:46:20 -gem5 executing on zizzer, pid 20726 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:23 +gem5 executing on e108600-lin, pid 23085 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/quick/se/00.hello/arm/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/arm/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/stats.txt index 92414aab2..40170ff2c 100644 --- a/tests/quick/se/00.hello/ref/arm/linux/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/arm/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000028 # Nu sim_ticks 28298500 # Number of ticks simulated final_tick 28298500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 377704 # Simulator instruction rate (inst/s) -host_op_rate 440559 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2337429945 # Simulator tick rate (ticks/s) -host_mem_usage 308268 # Number of bytes of host memory used -host_seconds 0.01 # Real time elapsed on the host +host_inst_rate 246555 # Simulator instruction rate (inst/s) +host_op_rate 287459 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1524533550 # Simulator tick rate (ticks/s) +host_mem_usage 264352 # Number of bytes of host memory used +host_seconds 0.02 # Real time elapsed on the host sim_insts 4566 # Number of instructions simulated sim_ops 5330 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -573,6 +573,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9024 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 24512 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 382 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.083770 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.277405 # Request fanout histogram @@ -600,6 +601,7 @@ system.membus.pkt_count::total 700 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22400 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 22400 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 350 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/config.ini b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/config.ini index 900efeb02..fb58e2bf8 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -108,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -167,12 +177,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -515,12 +535,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -539,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -567,12 +597,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -591,8 +626,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -600,10 +640,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -634,7 +679,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/mips/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/mips/linux/hello gid=100 input=cin kvmInSE=false @@ -666,10 +711,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -713,6 +763,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -724,7 +775,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simerr b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simout b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simout index fea443199..8c26880d3 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/simout @@ -3,13 +3,13 @@ Redirecting stderr to build/MIPS/tests/opt/quick/se/00.hello/mips/linux/o3-timin gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 22:04:10 -gem5 started Mar 14 2016 22:06:34 -gem5 executing on phenom, pid 29859 -command line: build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/MIPS/tests/opt/quick/se/00.hello/mips/linux/o3-timing +gem5 compiled Jul 21 2016 14:23:13 +gem5 started Jul 21 2016 14:23:47 +gem5 executing on e108600-lin, pid 13281 +command line: /work/curdun01/gem5-external.hg/build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/mips/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Hello World! -Exiting @ tick 22454000 because target called exit() +Exiting @ tick 22532000 because target called exit() diff --git a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/stats.txt index 1d63b6535..ba0daa415 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/mips/linux/o3-timing/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000023 # Nu sim_ticks 22532000 # Number of ticks simulated final_tick 22532000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 107418 # Simulator instruction rate (inst/s) -host_op_rate 107396 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 483974405 # Simulator tick rate (ticks/s) -host_mem_usage 292720 # Number of bytes of host memory used +host_inst_rate 94024 # Simulator instruction rate (inst/s) +host_op_rate 93989 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 423490323 # Simulator tick rate (ticks/s) +host_mem_usage 248172 # Number of bytes of host memory used host_seconds 0.05 # Real time elapsed on the host sim_insts 4999 # Number of instructions simulated sim_ops 4999 # Number of ops (including micro ops) simulated @@ -939,6 +939,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 8960 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 31296 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 472 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -966,6 +967,7 @@ system.membus.pkt_count::total 938 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 30016 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 30016 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 469 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/config.ini index 45a38b492..9bfe34f71 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -71,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -120,7 +130,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/mips/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/mips/linux/hello gid=100 input=cin kvmInSE=false @@ -152,10 +162,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -170,11 +185,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simout index 3810aff86..8185b0f75 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-a gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 22:04:10 -gem5 started Mar 14 2016 22:06:34 -gem5 executing on phenom, pid 29858 -command line: build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-atomic -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-atomic +gem5 compiled Jul 21 2016 14:23:13 +gem5 started Jul 21 2016 14:23:47 +gem5 executing on e108600-lin, pid 13283 +command line: /work/curdun01/gem5-external.hg/build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/mips/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/stats.txt index 873eb6862..619b5f6ac 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-atomic/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 2820500 # Number of ticks simulated final_tick 2820500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 743339 # Simulator instruction rate (inst/s) -host_op_rate 742439 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 370817863 # Simulator tick rate (ticks/s) -host_mem_usage 279644 # Number of bytes of host memory used +host_inst_rate 890532 # Simulator instruction rate (inst/s) +host_op_rate 888973 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 443763917 # Simulator tick rate (ticks/s) +host_mem_usage 236392 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 5641 # Number of instructions simulated sim_ops 5641 # Number of ops (including micro ops) simulated @@ -128,6 +128,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 22568 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 7902 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 30470 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 7678 # Request fanout histogram system.membus.snoop_fanout::mean 0.734827 # Request fanout histogram system.membus.snoop_fanout::stdev 0.441454 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/config.ini b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/config.ini index 86ced7654..2eeeec54d 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -70,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -88,12 +98,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -112,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -129,12 +149,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -153,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -181,12 +211,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -205,8 +240,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -214,10 +254,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -248,7 +293,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/mips/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/mips/linux/hello gid=100 input=cin kvmInSE=false @@ -280,10 +325,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -298,11 +348,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simerr b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simout b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simout index c38df8b63..9b7b607dc 100755 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-t gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 14 2016 22:04:10 -gem5 started Mar 14 2016 22:06:34 -gem5 executing on phenom, pid 29861 -command line: build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-timing +gem5 compiled Jul 21 2016 14:23:13 +gem5 started Jul 21 2016 14:23:47 +gem5 executing on e108600-lin, pid 13258 +command line: /work/curdun01/gem5-external.hg/build/MIPS/gem5.opt -d build/MIPS/tests/opt/quick/se/00.hello/mips/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/mips/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/stats.txt index 5a06a8f5e..29abc2b26 100644 --- a/tests/quick/se/00.hello/ref/mips/linux/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/mips/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000034 # Nu sim_ticks 33932500 # Number of ticks simulated final_tick 33932500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 497160 # Simulator instruction rate (inst/s) -host_op_rate 496749 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2985875640 # Simulator tick rate (ticks/s) -host_mem_usage 289632 # Number of bytes of host memory used -host_seconds 0.01 # Real time elapsed on the host +host_inst_rate 18620 # Simulator instruction rate (inst/s) +host_op_rate 18619 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 111991731 # Simulator tick rate (ticks/s) +host_mem_usage 246380 # Number of bytes of host memory used +host_seconds 0.30 # Real time elapsed on the host sim_insts 5641 # Number of instructions simulated sim_ops 5641 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -461,6 +461,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 8768 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 28480 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 432 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -488,6 +489,7 @@ system.membus.pkt_count::total 860 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 27520 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 27520 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 430 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/power/linux/o3-timing/config.ini b/tests/quick/se/00.hello/ref/power/linux/o3-timing/config.ini index 0500fc3ab..11c8c38c9 100644 --- a/tests/quick/se/00.hello/ref/power/linux/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/power/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -73,6 +78,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -109,6 +115,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -168,12 +178,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -192,8 +207,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -516,12 +536,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -540,8 +565,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -565,12 +595,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -589,8 +624,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -598,10 +638,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -632,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/power/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/power/linux/hello gid=100 input=cin kvmInSE=false @@ -664,10 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -711,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -722,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/power/linux/o3-timing/simerr b/tests/quick/se/00.hello/ref/power/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/power/linux/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/power/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/power/linux/o3-timing/simout b/tests/quick/se/00.hello/ref/power/linux/o3-timing/simout index 4c7495180..bd0101e05 100755 --- a/tests/quick/se/00.hello/ref/power/linux/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/power/linux/o3-timing/simout @@ -1,12 +1,14 @@ +Redirecting stdout to build/POWER/tests/opt/quick/se/00.hello/power/linux/o3-timing/simout +Redirecting stderr to build/POWER/tests/opt/quick/se/00.hello/power/linux/o3-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 13 2016 22:44:19 -gem5 started Mar 13 2016 22:49:48 -gem5 executing on phenom, pid 19921 -command line: build/POWER/gem5.opt -d build/POWER/tests/opt/quick/se/00.hello/power/linux/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/POWER/tests/opt/quick/se/00.hello/power/linux/o3-timing +gem5 compiled Jul 21 2016 14:27:08 +gem5 started Jul 21 2016 14:27:33 +gem5 executing on e108600-lin, pid 27995 +command line: /work/curdun01/gem5-external.hg/build/POWER/gem5.opt -d build/POWER/tests/opt/quick/se/00.hello/power/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/power/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... Hello world! -Exiting @ tick 19923000 because target called exit() +Exiting @ tick 19908000 because target called exit() diff --git a/tests/quick/se/00.hello/ref/power/linux/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/power/linux/o3-timing/stats.txt index c26ae805a..a1b1af10d 100644 --- a/tests/quick/se/00.hello/ref/power/linux/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/power/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000020 # Nu sim_ticks 19908000 # Number of ticks simulated final_tick 19908000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 67828 # Simulator instruction rate (inst/s) -host_op_rate 67820 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 233087583 # Simulator tick rate (ticks/s) -host_mem_usage 290888 # Number of bytes of host memory used -host_seconds 0.09 # Real time elapsed on the host +host_inst_rate 79311 # Simulator instruction rate (inst/s) +host_op_rate 79299 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 272523705 # Simulator tick rate (ticks/s) +host_mem_usage 246096 # Number of bytes of host memory used +host_seconds 0.07 # Real time elapsed on the host sim_insts 5792 # Number of instructions simulated sim_ops 5792 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -937,6 +937,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 6528 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 28864 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 454 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.017621 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.131715 # Request fanout histogram @@ -964,6 +965,7 @@ system.membus.pkt_count::total 888 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 28352 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 28352 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 445 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/config.ini index ae31c4fe2..b654cdd15 100644 --- a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -56,6 +61,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -118,7 +128,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/power/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/power/linux/hello gid=100 input=cin kvmInSE=false @@ -150,10 +160,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -168,11 +183,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simout index 0c347fd1b..cbf63eeba 100755 --- a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/POWER/tests/opt/quick/se/00.hello/power/linux/simple-atomic/simout +Redirecting stderr to build/POWER/tests/opt/quick/se/00.hello/power/linux/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:25:19 -gem5 started Jan 21 2016 14:25:54 -gem5 executing on zizzer, pid 3347 -command line: build/POWER/gem5.opt -d build/POWER/tests/opt/quick/se/00.hello/power/linux/simple-atomic -re /z/atgutier/gem5/gem5-commit/tests/run.py build/POWER/tests/opt/quick/se/00.hello/power/linux/simple-atomic +gem5 compiled Jul 21 2016 14:27:08 +gem5 started Jul 21 2016 14:27:33 +gem5 executing on e108600-lin, pid 28000 +command line: /work/curdun01/gem5-external.hg/build/POWER/gem5.opt -d build/POWER/tests/opt/quick/se/00.hello/power/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/power/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/stats.txt index 7771c9798..c2dda4058 100644 --- a/tests/quick/se/00.hello/ref/power/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/power/linux/simple-atomic/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 2896000 # Number of ticks simulated final_tick 2896000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 806520 # Simulator instruction rate (inst/s) -host_op_rate 805428 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 402167874 # Simulator tick rate (ticks/s) -host_mem_usage 277804 # Number of bytes of host memory used +host_inst_rate 667625 # Simulator instruction rate (inst/s) +host_op_rate 666766 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 332924076 # Simulator tick rate (ticks/s) +host_mem_usage 235336 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 5793 # Number of instructions simulated sim_ops 5793 # Number of ops (including micro ops) simulated @@ -128,6 +128,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 23172 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 7929 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 31101 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 7800 # Request fanout histogram system.membus.snoop_fanout::mean 0.742692 # Request fanout histogram system.membus.snoop_fanout::stdev 0.437178 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/config.ini index bf044682b..c79232133 100644 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -71,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -117,7 +127,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/sparc/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/sparc/linux/hello gid=100 input=cin kvmInSE=false @@ -149,10 +159,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -167,11 +182,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simout index 4be93416d..0783a6d90 100755 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-atomic/simout +Redirecting stderr to build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:30:54 -gem5 started Jan 21 2016 14:31:25 -gem5 executing on zizzer, pid 8711 -command line: build/SPARC/gem5.opt -d build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-atomic -re /z/atgutier/gem5/gem5-commit/tests/run.py build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-atomic +gem5 compiled Jul 21 2016 14:30:06 +gem5 started Jul 21 2016 14:30:36 +gem5 executing on e108600-lin, pid 38676 +command line: /work/curdun01/gem5-external.hg/build/SPARC/gem5.opt -d build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/sparc/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/stats.txt index 5963e613d..c6c8dc595 100644 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-atomic/stats.txt @@ -4,10 +4,10 @@ sim_seconds 0.000003 # Nu sim_ticks 2694500 # Number of ticks simulated final_tick 2694500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 770174 # Simulator instruction rate (inst/s) -host_op_rate 769174 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 388618840 # Simulator tick rate (ticks/s) -host_mem_usage 281084 # Number of bytes of host memory used +host_inst_rate 588885 # Simulator instruction rate (inst/s) +host_op_rate 587162 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 296343205 # Simulator tick rate (ticks/s) +host_mem_usage 237088 # Number of bytes of host memory used host_seconds 0.01 # Real time elapsed on the host sim_insts 5327 # Number of instructions simulated sim_ops 5327 # Number of ops (including micro ops) simulated @@ -110,6 +110,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 21480 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 9667 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 31147 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 6758 # Request fanout histogram system.membus.snoop_fanout::mean 0.794614 # Request fanout histogram system.membus.snoop_fanout::stdev 0.404013 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/config.ini b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/config.ini index 75416425d..467bc0996 100644 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -70,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -88,12 +98,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -112,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -129,12 +149,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -153,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -178,12 +208,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -202,8 +237,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -211,10 +251,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -245,7 +290,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/sparc/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/sparc/linux/hello gid=100 input=cin kvmInSE=false @@ -277,10 +322,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -295,11 +345,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simerr b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simout b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simout index 0694c2e26..a65457027 100755 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-timing/simout +Redirecting stderr to build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:30:54 -gem5 started Jan 21 2016 14:31:24 -gem5 executing on zizzer, pid 8705 -command line: build/SPARC/gem5.opt -d build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-timing +gem5 compiled Jul 21 2016 14:30:06 +gem5 started Jul 21 2016 14:30:36 +gem5 executing on e108600-lin, pid 38670 +command line: /work/curdun01/gem5-external.hg/build/SPARC/gem5.opt -d build/SPARC/tests/opt/quick/se/00.hello/sparc/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/sparc/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/stats.txt index 77136ce08..61bfb723b 100644 --- a/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/sparc/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000031 # Nu sim_ticks 30526500 # Number of ticks simulated final_tick 30526500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 412582 # Simulator instruction rate (inst/s) -host_op_rate 412293 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2361216779 # Simulator tick rate (ticks/s) -host_mem_usage 290052 # Number of bytes of host memory used -host_seconds 0.01 # Real time elapsed on the host +host_inst_rate 232577 # Simulator instruction rate (inst/s) +host_op_rate 232336 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1330299281 # Simulator tick rate (ticks/s) +host_mem_usage 247080 # Number of bytes of host memory used +host_seconds 0.02 # Real time elapsed on the host sim_insts 5327 # Number of instructions simulated sim_ops 5327 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -440,6 +440,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 8640 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 25088 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 392 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.007653 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.087258 # Request fanout histogram @@ -467,6 +468,7 @@ system.membus.pkt_count::total 778 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 24896 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 24896 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 389 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/config.ini b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/config.ini index 81a7b3677..8fda1a50c 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/config.ini +++ b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -108,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -173,12 +183,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -197,8 +212,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -212,8 +232,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -531,12 +556,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -555,18 +585,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -586,8 +626,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -598,12 +643,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -622,8 +672,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -631,10 +686,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -665,7 +725,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/home/stever/hg/m5sim.org/gem5/tests/test-progs/hello/bin/x86/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/x86/linux/hello gid=100 input=cin kvmInSE=false @@ -697,10 +757,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -744,6 +809,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -755,7 +821,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simerr b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simerr +++ b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simout b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simout index 2ebb8dfe8..8cf3e8140 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simout +++ b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/simout @@ -1,12 +1,14 @@ +Redirecting stdout to build/X86/tests/opt/quick/se/00.hello/x86/linux/o3-timing/simout +Redirecting stderr to build/X86/tests/opt/quick/se/00.hello/x86/linux/o3-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 13 2016 22:47:14 -gem5 started Mar 13 2016 22:50:36 -gem5 executing on phenom, pid 19928 -command line: build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/X86/tests/opt/quick/se/00.hello/x86/linux/o3-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:18 +gem5 executing on e108600-lin, pid 18560 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/x86/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... Hello world! -Exiting @ tick 20818000 because target called exit() +Exiting @ tick 21273500 because target called exit() diff --git a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/stats.txt b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/stats.txt index 5bbab77d0..07049f339 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/o3-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/x86/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000021 # Nu sim_ticks 21273500 # Number of ticks simulated final_tick 21273500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 60676 # Simulator instruction rate (inst/s) -host_op_rate 109916 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 239878032 # Simulator tick rate (ticks/s) -host_mem_usage 312536 # Number of bytes of host memory used -host_seconds 0.09 # Real time elapsed on the host +host_inst_rate 32077 # Simulator instruction rate (inst/s) +host_op_rate 58109 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 126812951 # Simulator tick rate (ticks/s) +host_mem_usage 266996 # Number of bytes of host memory used +host_seconds 0.17 # Real time elapsed on the host sim_insts 5380 # Number of instructions simulated sim_ops 9747 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -918,6 +918,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 8896 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 26688 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 417 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.002398 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.048970 # Request fanout histogram @@ -947,6 +948,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 2 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 26624 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 26624 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 416 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/config.ini b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/config.ini index 4a2074e15..62043a3c5 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/config.ini +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -71,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -101,18 +111,28 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[4] [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[5] int_slave=system.membus.master[2] @@ -132,8 +152,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[3] @@ -151,7 +176,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/x86/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/x86/linux/hello gid=100 input=cin kvmInSE=false @@ -183,10 +208,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -201,11 +231,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simerr b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simerr +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simout b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simout index 87f2a402c..bd2d6df6a 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simout +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-atomic/simout +Redirecting stderr to build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:41:03 -gem5 started Jan 21 2016 14:41:54 -gem5 executing on zizzer, pid 17917 -command line: build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-atomic -re /z/atgutier/gem5/gem5-commit/tests/run.py build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-atomic +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:19 +gem5 executing on e108600-lin, pid 18561 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/x86/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/stats.txt b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/stats.txt index da4043b17..563e9e0f5 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/stats.txt +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000006 # Nu sim_ticks 5615000 # Number of ticks simulated final_tick 5615000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 383826 # Simulator instruction rate (inst/s) -host_op_rate 694898 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 400042776 # Simulator tick rate (ticks/s) -host_mem_usage 299464 # Number of bytes of host memory used -host_seconds 0.01 # Real time elapsed on the host +host_inst_rate 290053 # Simulator instruction rate (inst/s) +host_op_rate 524918 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 302085986 # Simulator tick rate (ticks/s) +host_mem_usage 255208 # Number of bytes of host memory used +host_seconds 0.02 # Real time elapsed on the host sim_insts 5381 # Number of instructions simulated sim_ops 9748 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -120,6 +120,7 @@ system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 14178 system.membus.pkt_size_system.cpu.dcache_port::total 14178 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 69090 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 8852 # Request fanout histogram system.membus.snoop_fanout::mean 0.775418 # Request fanout histogram system.membus.snoop_fanout::stdev 0.417330 # Request fanout histogram diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/config.ini b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/config.ini index ca3cb5481..bc04df7fd 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/config.ini +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -70,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -94,12 +104,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -118,8 +133,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -133,8 +153,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -145,12 +170,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -169,18 +199,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -200,8 +240,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -212,12 +257,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -236,8 +286,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -245,10 +300,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -279,7 +339,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/regression/test-progs/hello/bin/x86/linux/hello +executable=/arm/projectscratch/randd/systems/dist/test-progs/hello/bin/x86/linux/hello gid=100 input=cin kvmInSE=false @@ -311,10 +371,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -329,11 +394,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simerr b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simerr +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simout b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simout index b229084a6..17523a325 100755 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simout +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/simout @@ -1,10 +1,12 @@ +Redirecting stdout to build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-timing/simout +Redirecting stderr to build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 21 2016 14:41:03 -gem5 started Jan 21 2016 14:41:52 -gem5 executing on zizzer, pid 17886 -command line: build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-timing -re /z/atgutier/gem5/gem5-commit/tests/run.py build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:20 +gem5 executing on e108600-lin, pid 18567 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/quick/se/00.hello/x86/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py quick/se/00.hello/x86/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/stats.txt b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/stats.txt index be586bcab..9047321d1 100644 --- a/tests/quick/se/00.hello/ref/x86/linux/simple-timing/stats.txt +++ b/tests/quick/se/00.hello/ref/x86/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.000031 # Nu sim_ticks 30886500 # Number of ticks simulated final_tick 30886500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 223066 # Simulator instruction rate (inst/s) -host_op_rate 403939 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1279464733 # Simulator tick rate (ticks/s) -host_mem_usage 309460 # Number of bytes of host memory used -host_seconds 0.02 # Real time elapsed on the host +host_inst_rate 211795 # Simulator instruction rate (inst/s) +host_op_rate 383429 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1214135841 # Simulator tick rate (ticks/s) +host_mem_usage 263924 # Number of bytes of host memory used +host_seconds 0.03 # Real time elapsed on the host sim_insts 5381 # Number of instructions simulated sim_ops 9748 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -442,6 +442,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 8576 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 23168 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 362 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.002762 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.052559 # Request fanout histogram @@ -471,6 +472,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 2 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 23104 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 23104 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 361 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram |