diff options
author | Curtis Dunham <Curtis.Dunham@arm.com> | 2016-07-21 17:19:18 +0100 |
---|---|---|
committer | Curtis Dunham <Curtis.Dunham@arm.com> | 2016-07-21 17:19:18 +0100 |
commit | 84f138ba96201431513eb2ae5f847389ac731aa2 (patch) | |
tree | 3aee721699295c85e4e0c2d3d4a6bb27595bfabd /tests/long/se | |
parent | a288c94387b110112461ff5686fa727a43ddbe9c (diff) | |
download | gem5-84f138ba96201431513eb2ae5f847389ac731aa2.tar.xz |
stats: update references
Diffstat (limited to 'tests/long/se')
188 files changed, 20474 insertions, 7062 deletions
diff --git a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/config.ini b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/config.ini index 8b738959d..20272ec5e 100644 --- a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -99,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -120,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -132,13 +154,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -148,6 +175,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -156,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -180,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -196,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -591,13 +634,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -607,6 +655,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -615,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -626,6 +680,7 @@ eventq_index=0 [system.cpu.isa] type=ArmISA +decoderFlavour=Generic eventq_index=0 fpsid=1090793632 id_aa64afr0_el1=0 @@ -673,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -689,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -701,13 +766,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -717,6 +787,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -725,19 +796,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -745,6 +828,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -759,9 +849,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/mcf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/mcf gid=100 -input=/scratch/nilay/GEM5/dist/m5/cpu2000/data/mcf/smred/input/mcf.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/mcf/smred/input/mcf.in kvmInSE=false max_stack_size=67108864 output=cout @@ -791,9 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -837,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -848,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:268435455 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simerr b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simerr index e9c9539d6..36f24465c 100755 --- a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (256 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simout b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simout index d0ca2b5a8..1a3679afb 100755 --- a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/10.mcf/arm/linux/minor-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 02:29:01 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/10.mcf/arm/linux/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/10.mcf/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:03:02 +gem5 executing on e108600-lin, pid 24162 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/10.mcf/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/10.mcf/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -26,4 +26,4 @@ simplex iterations : 2663 flow value : 3080014995 checksum : 68389 optimal -Exiting @ tick 61240850500 because target called exit() +Exiting @ tick 62408957500 because target called exit() diff --git a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/stats.txt index c8a3d5425..ef2534218 100644 --- a/tests/long/se/10.mcf/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/10.mcf/ref/arm/linux/minor-timing/stats.txt @@ -1,49 +1,49 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.061235 # Number of seconds simulated -sim_ticks 61234797500 # Number of ticks simulated -final_tick 61234797500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.062409 # Number of seconds simulated +sim_ticks 62408957500 # Number of ticks simulated +final_tick 62408957500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 433531 # Simulator instruction rate (inst/s) -host_op_rate 435690 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 293005809 # Simulator tick rate (ticks/s) -host_mem_usage 447448 # Number of bytes of host memory used -host_seconds 208.99 # Real time elapsed on the host +host_inst_rate 176281 # Simulator instruction rate (inst/s) +host_op_rate 177159 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 121425676 # Simulator tick rate (ticks/s) +host_mem_usage 399932 # Number of bytes of host memory used +host_seconds 513.97 # Real time elapsed on the host sim_insts 90602850 # Number of instructions simulated sim_ops 91054081 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.physmem.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.physmem.bytes_read::cpu.inst 49472 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 947200 # Number of bytes read from this memory -system.physmem.bytes_read::total 996672 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 947264 # Number of bytes read from this memory +system.physmem.bytes_read::total 996736 # Number of bytes read from this memory system.physmem.bytes_inst_read::cpu.inst 49472 # Number of instructions bytes read from this memory system.physmem.bytes_inst_read::total 49472 # Number of instructions bytes read from this memory system.physmem.num_reads::cpu.inst 773 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 14800 # Number of read requests responded to by this memory -system.physmem.num_reads::total 15573 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 807907 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 15468329 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 16276236 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 807907 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 807907 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 807907 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 15468329 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 16276236 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 15573 # Number of read requests accepted +system.physmem.num_reads::cpu.data 14801 # Number of read requests responded to by this memory +system.physmem.num_reads::total 15574 # Number of read requests responded to by this memory +system.physmem.bw_read::cpu.inst 792707 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 15178334 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 15971041 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 792707 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 792707 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 792707 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 15178334 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 15971041 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 15574 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted -system.physmem.readBursts 15573 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 15574 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 0 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 996672 # Total number of bytes read from DRAM +system.physmem.bytesReadDRAM 996736 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 0 # Total number of bytes read from write queue system.physmem.bytesWritten 0 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 996672 # Total read bytes from the system interface side +system.physmem.bytesReadSys 996736 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 0 # Total written bytes from the system interface side system.physmem.servicedByWrQ 0 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write system.physmem.perBankRdBursts::0 993 # Per bank write bursts -system.physmem.perBankRdBursts::1 890 # Per bank write bursts +system.physmem.perBankRdBursts::1 891 # Per bank write bursts system.physmem.perBankRdBursts::2 949 # Per bank write bursts system.physmem.perBankRdBursts::3 1027 # Per bank write bursts system.physmem.perBankRdBursts::4 1050 # Per bank write bursts @@ -76,14 +76,14 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 61234703000 # Total gap between requests +system.physmem.totGap 62408863500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 15573 # Read request sizes (log2) +system.physmem.readPktSize::6 15574 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 15454 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 15459 # What read queue length does an incoming req see system.physmem.rdQLenPdf::1 109 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 10 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 6 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -187,86 +187,86 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 1535 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 648.213681 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 443.714701 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 401.012846 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 241 15.70% 15.70% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 186 12.12% 27.82% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 88 5.73% 33.55% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 73 4.76% 38.31% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 71 4.63% 42.93% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 84 5.47% 48.40% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 36 2.35% 50.75% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 51 3.32% 54.07% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 705 45.93% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 1535 # Bytes accessed per row activation -system.physmem.totQLat 72594750 # Total ticks spent queuing -system.physmem.totMemAccLat 364588500 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 77865000 # Total ticks spent in databus transfers -system.physmem.avgQLat 4661.58 # Average queueing delay per DRAM burst +system.physmem.bytesPerActivate::samples 1549 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 642.437702 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 437.017774 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 401.182344 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 251 16.20% 16.20% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 185 11.94% 28.15% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 90 5.81% 33.96% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 67 4.33% 38.28% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 77 4.97% 43.25% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 93 6.00% 49.26% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 42 2.71% 51.97% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 43 2.78% 54.74% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 701 45.26% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 1549 # Bytes accessed per row activation +system.physmem.totQLat 75120250 # Total ticks spent queuing +system.physmem.totMemAccLat 367132750 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 77870000 # Total ticks spent in databus transfers +system.physmem.avgQLat 4823.44 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 23411.58 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 16.28 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 23573.44 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 15.97 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 16.28 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 15.97 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 0.13 # Data bus utilization in percentage -system.physmem.busUtilRead 0.13 # Data bus utilization in percentage for reads +system.physmem.busUtil 0.12 # Data bus utilization in percentage +system.physmem.busUtilRead 0.12 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 14028 # Number of row buffer hits during reads +system.physmem.readRowHits 14020 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 90.08 # Row buffer hit rate for reads +system.physmem.readRowHitRate 90.02 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 3932107.04 # Average gap between requests -system.physmem.pageHitRate 90.08 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 6282360 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 3427875 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 63679200 # Energy for read commands per rank (pJ) +system.physmem.avgGap 4007246.92 # Average gap between requests +system.physmem.pageHitRate 90.02 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 6395760 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 3489750 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 63772800 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 3999315840 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 2519893620 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 34528365000 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 41120963895 # Total energy per rank (pJ) -system.physmem_0.averagePower 671.567381 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 57430990750 # Time in different power states -system.physmem_0.memoryStateTime::REF 2044640000 # Time in different power states +system.physmem_0.refreshEnergy 4076108400 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 2565881505 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 35193459000 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 41909107215 # Total energy per rank (pJ) +system.physmem_0.averagePower 671.544396 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 58537353750 # Time in different power states +system.physmem_0.memoryStateTime::REF 2083900000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 1755713000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 1785901250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states system.physmem_1.actEnergy 5314680 # Energy for activate commands per rank (pJ) system.physmem_1.preEnergy 2899875 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 57462600 # Energy for read commands per rank (pJ) +system.physmem_1.readEnergy 57509400 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 3999315840 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 2548962765 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 34502857500 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 41116813260 # Total energy per rank (pJ) -system.physmem_1.averagePower 671.499745 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 57389143250 # Time in different power states -system.physmem_1.memoryStateTime::REF 2044640000 # Time in different power states +system.physmem_1.refreshEnergy 4076108400 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 2571480045 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 35188548000 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 41901860400 # Total energy per rank (pJ) +system.physmem_1.averagePower 671.428274 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 58529558500 # Time in different power states +system.physmem_1.memoryStateTime::REF 2083900000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 1797845750 # Time in different power states +system.physmem_1.memoryStateTime::ACT 1793609000 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 20750031 # Number of BP lookups -system.cpu.branchPred.condPredicted 17060378 # Number of conditional branches predicted +system.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 20808236 # Number of BP lookups +system.cpu.branchPred.condPredicted 17115622 # Number of conditional branches predicted system.cpu.branchPred.condIncorrect 756798 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 8954908 # Number of BTB lookups -system.cpu.branchPred.BTBHits 8830467 # Number of BTB hits +system.cpu.branchPred.BTBLookups 8965652 # Number of BTB lookups +system.cpu.branchPred.BTBHits 8840815 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 98.610360 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 61988 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 98.607608 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 61995 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 17 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 26205 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectLookups 26211 # Number of indirect predictor lookups. system.cpu.branchPred.indirectHits 24795 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 1410 # Number of indirect misses. +system.cpu.branchPred.indirectMisses 1416 # Number of indirect misses. system.cpu.branchPredindirectMispredicted 665 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -296,7 +296,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -326,7 +326,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -356,7 +356,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -387,16 +387,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 442 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 122469595 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 124817915 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 90602850 # Number of instructions committed system.cpu.committedOps 91054081 # Number of ops (including micro ops) committed -system.cpu.discardedOps 2175024 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 2182474 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.351719 # CPI: cycles per instruction -system.cpu.ipc 0.739799 # IPC: instructions per cycle +system.cpu.cpi 1.377638 # CPI: cycles per instruction +system.cpu.ipc 0.725880 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 63822829 70.09% 70.09% # Class of committed instruction system.cpu.op_class_0::IntMult 10474 0.01% 70.10% # Class of committed instruction @@ -432,60 +432,60 @@ system.cpu.op_class_0::MemWrite 4744844 5.21% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 91054081 # Class of committed instruction -system.cpu.tickCycles 109245506 # Number of cycles that the object actually ticked -system.cpu.idleCycles 13224089 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 946097 # number of replacements -system.cpu.dcache.tags.tagsinuse 3616.804007 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 26262686 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 950193 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 27.639317 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 20511782500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 3616.804007 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.883009 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.883009 # Average percentage of cache occupancy +system.cpu.tickCycles 110516717 # Number of cycles that the object actually ticked +system.cpu.idleCycles 14301198 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 946101 # number of replacements +system.cpu.dcache.tags.tagsinuse 3621.431844 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 26274920 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 950197 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 27.652076 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 20702462500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 3621.431844 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.884139 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.884139 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 260 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 2253 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 1583 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 242 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 2203 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 1651 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 55454003 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 55454003 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 21593712 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 21593712 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 4660692 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 4660692 # number of WriteReq hits +system.cpu.dcache.tags.tag_accesses 55461267 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 55461267 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 21605941 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 21605941 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 4660697 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 4660697 # number of WriteReq hits system.cpu.dcache.SoftPFReq_hits::cpu.data 508 # number of SoftPFReq hits system.cpu.dcache.SoftPFReq_hits::total 508 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 3887 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 3887 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 3887 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 3887 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 26254404 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 26254404 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 26254912 # number of overall hits -system.cpu.dcache.overall_hits::total 26254912 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 914926 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 914926 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 74289 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 74289 # number of WriteReq misses +system.cpu.dcache.demand_hits::cpu.data 26266638 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 26266638 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 26267146 # number of overall hits +system.cpu.dcache.overall_hits::total 26267146 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 906327 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 906327 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 74284 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 74284 # number of WriteReq misses system.cpu.dcache.SoftPFReq_misses::cpu.data 4 # number of SoftPFReq misses system.cpu.dcache.SoftPFReq_misses::total 4 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 989215 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 989215 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 989219 # number of overall misses -system.cpu.dcache.overall_misses::total 989219 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 11919140000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 11919140000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 2539899500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 2539899500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 14459039500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 14459039500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 14459039500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 14459039500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 22508638 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 22508638 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 980611 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 980611 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 980615 # number of overall misses +system.cpu.dcache.overall_misses::total 980615 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 11805097500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 11805097500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 2540928500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 2540928500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 14346026000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 14346026000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 14346026000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 14346026000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 22512268 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 22512268 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 4734981 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 4734981 # number of WriteReq accesses(hits+misses) system.cpu.dcache.SoftPFReq_accesses::cpu.data 512 # number of SoftPFReq accesses(hits+misses) @@ -494,139 +494,139 @@ system.cpu.dcache.LoadLockedReq_accesses::cpu.data 3887 system.cpu.dcache.LoadLockedReq_accesses::total 3887 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 3887 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 3887 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 27243619 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 27243619 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 27244131 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 27244131 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.040648 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.040648 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.015689 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.015689 # miss rate for WriteReq accesses +system.cpu.dcache.demand_accesses::cpu.data 27247249 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 27247249 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 27247761 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 27247761 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.040259 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.040259 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.015688 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.015688 # miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.007812 # miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_miss_rate::total 0.007812 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.036310 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.036310 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.036309 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.036309 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 13027.436099 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 13027.436099 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 34189.442582 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 34189.442582 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 14616.680398 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 14616.680398 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 14616.621294 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 14616.621294 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.035989 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.035989 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.035989 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.035989 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 13025.207789 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 13025.207789 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 34205.596091 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 34205.596091 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 14629.680883 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 14629.680883 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 14629.621207 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 14629.621207 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 943278 # number of writebacks -system.cpu.dcache.writebacks::total 943278 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 11500 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 11500 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 27525 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 27525 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 39025 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 39025 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 39025 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 39025 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 903426 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 903426 # number of ReadReq MSHR misses +system.cpu.dcache.writebacks::writebacks 943282 # number of writebacks +system.cpu.dcache.writebacks::total 943282 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 2897 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 2897 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 27520 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 27520 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 30417 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 30417 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 30417 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 30417 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 903430 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 903430 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 46764 # number of WriteReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::total 46764 # number of WriteReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 3 # number of SoftPFReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::total 3 # number of SoftPFReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 950190 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 950190 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 950193 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 950193 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 10865506000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 10865506000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 1480423500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 1480423500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 156500 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 156500 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 12345929500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 12345929500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 12346086000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 12346086000 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.040137 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.040137 # mshr miss rate for ReadReq accesses +system.cpu.dcache.demand_mshr_misses::cpu.data 950194 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 950194 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 950197 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 950197 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 10863020500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 10863020500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 1482579500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 1482579500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 156000 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 156000 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 12345600000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 12345600000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 12345756000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 12345756000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.040131 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.040131 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.009876 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.009876 # mshr miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.005859 # mshr miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.005859 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.034878 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.034878 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.034877 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.034877 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 12027.001658 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 12027.001658 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 31657.332564 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 31657.332564 # average WriteReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 52166.666667 # average SoftPFReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 52166.666667 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 12993.116640 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 12993.116640 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 12993.240321 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 12993.240321 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.034873 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.034873 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.034872 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.034872 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 12024.197226 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 12024.197226 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 31703.436404 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 31703.436404 # average WriteReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 52000 # average SoftPFReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 52000 # average SoftPFReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 12992.715172 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 12992.715172 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 12992.838327 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 12992.838327 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.icache.tags.replacements 5 # number of replacements -system.cpu.icache.tags.tagsinuse 689.102041 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 27766889 # Total number of references to valid blocks. +system.cpu.icache.tags.tagsinuse 689.591924 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 27835291 # Total number of references to valid blocks. system.cpu.icache.tags.sampled_refs 801 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 34665.279650 # Average number of references to valid blocks. +system.cpu.icache.tags.avg_refs 34750.675406 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 689.102041 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.336476 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.336476 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 689.591924 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.336715 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.336715 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 796 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 42 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::2 13 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::3 1 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::4 740 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.388672 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 55536181 # Number of tag accesses -system.cpu.icache.tags.data_accesses 55536181 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 27766889 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 27766889 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 27766889 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 27766889 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 27766889 # number of overall hits -system.cpu.icache.overall_hits::total 27766889 # number of overall hits +system.cpu.icache.tags.tag_accesses 55672985 # Number of tag accesses +system.cpu.icache.tags.data_accesses 55672985 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 27835291 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 27835291 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 27835291 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 27835291 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 27835291 # number of overall hits +system.cpu.icache.overall_hits::total 27835291 # number of overall hits system.cpu.icache.ReadReq_misses::cpu.inst 801 # number of ReadReq misses system.cpu.icache.ReadReq_misses::total 801 # number of ReadReq misses system.cpu.icache.demand_misses::cpu.inst 801 # number of demand (read+write) misses system.cpu.icache.demand_misses::total 801 # number of demand (read+write) misses system.cpu.icache.overall_misses::cpu.inst 801 # number of overall misses system.cpu.icache.overall_misses::total 801 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 60228000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 60228000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 60228000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 60228000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 60228000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 60228000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 27767690 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 27767690 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 27767690 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 27767690 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 27767690 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 27767690 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 60446000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 60446000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 60446000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 60446000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 60446000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 60446000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 27836092 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 27836092 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 27836092 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 27836092 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 27836092 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 27836092 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000029 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000029 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000029 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000029 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000029 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000029 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 75191.011236 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 75191.011236 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 75191.011236 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 75191.011236 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 75191.011236 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 75191.011236 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 75463.171036 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 75463.171036 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 75463.171036 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 75463.171036 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 75463.171036 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 75463.171036 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -641,254 +641,256 @@ system.cpu.icache.demand_mshr_misses::cpu.inst 801 system.cpu.icache.demand_mshr_misses::total 801 # number of demand (read+write) MSHR misses system.cpu.icache.overall_mshr_misses::cpu.inst 801 # number of overall MSHR misses system.cpu.icache.overall_mshr_misses::total 801 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 59427000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 59427000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 59427000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 59427000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 59427000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 59427000 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 59645000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 59645000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 59645000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 59645000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 59645000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 59645000 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000029 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000029 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000029 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000029 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000029 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000029 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 74191.011236 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 74191.011236 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 74191.011236 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 74191.011236 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 74191.011236 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 74191.011236 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 74463.171036 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 74463.171036 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 74463.171036 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 74463.171036 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 74463.171036 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 74463.171036 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 10244.686315 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 1833993 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 15556 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 117.896182 # Average number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 10294.680667 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 1834001 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 15557 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 117.889117 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 9355.125797 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 674.107024 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 215.453494 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.285496 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.020572 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.006575 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.312643 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 15556 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 46 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 9404.439964 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 674.596313 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 215.644390 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.287001 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.020587 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.006581 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.314169 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 15557 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 47 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 14 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::2 524 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 1096 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 13876 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.474731 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 15237888 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 15237888 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 943278 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 943278 # number of WritebackDirty hits +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 1094 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 13878 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.474762 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 15237953 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 15237953 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 943282 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 943282 # number of WritebackDirty hits system.cpu.l2cache.WritebackClean_hits::writebacks 4 # number of WritebackClean hits system.cpu.l2cache.WritebackClean_hits::total 4 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 32220 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 32220 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 26 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 26 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 903167 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 903167 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 26 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 935387 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 935413 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 26 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 935387 # number of overall hits -system.cpu.l2cache.overall_hits::total 935413 # number of overall hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 27 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 27 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 903170 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 903170 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 27 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 935390 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 935417 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 27 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 935390 # number of overall hits +system.cpu.l2cache.overall_hits::total 935417 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 14544 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 14544 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 775 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 775 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 262 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 262 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 775 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 14806 # number of demand (read+write) misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 774 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 774 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 263 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 263 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 774 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 14807 # number of demand (read+write) misses system.cpu.l2cache.demand_misses::total 15581 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 775 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 14806 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.inst 774 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 14807 # number of overall misses system.cpu.l2cache.overall_misses::total 15581 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 1066480500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 1066480500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 57929500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 57929500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 22043500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 22043500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 57929500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 1088524000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 1146453500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 57929500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 1088524000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 1146453500 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 943278 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 943278 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 1068633000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 1068633000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 58136500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 58136500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 22289000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 22289000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 58136500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 1090922000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 1149058500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 58136500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 1090922000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 1149058500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 943282 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 943282 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackClean_accesses::writebacks 4 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.WritebackClean_accesses::total 4 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 46764 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 46764 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 801 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::total 801 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 903429 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 903429 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 903433 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 903433 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.demand_accesses::cpu.inst 801 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 950193 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 950994 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 950197 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 950998 # number of demand (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.inst 801 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 950193 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 950994 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 950197 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 950998 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.311008 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.311008 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.967541 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.967541 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.000290 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.000290 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.967541 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.015582 # miss rate for demand accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.966292 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.966292 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.000291 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.000291 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.966292 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.015583 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate::total 0.016384 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.967541 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.015582 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.966292 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.015583 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::total 0.016384 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 73327.867162 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 73327.867162 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74747.741935 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74747.741935 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 84135.496183 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 84135.496183 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74747.741935 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 73519.113873 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 73580.225916 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74747.741935 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 73519.113873 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 73580.225916 # average overall miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 73475.866337 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 73475.866337 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 75111.757106 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 75111.757106 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 84749.049430 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 84749.049430 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 75111.757106 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 73676.099142 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 73747.416725 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 75111.757106 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 73676.099142 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 73747.416725 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 2 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadCleanReq_mshr_hits::total 2 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 1 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::total 1 # number of ReadCleanReq MSHR hits system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 6 # number of ReadSharedReq MSHR hits system.cpu.l2cache.ReadSharedReq_mshr_hits::total 6 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.inst 2 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.inst 1 # number of demand (read+write) MSHR hits system.cpu.l2cache.demand_mshr_hits::cpu.data 6 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 8 # number of demand (read+write) MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.inst 2 # number of overall MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 7 # number of demand (read+write) MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.inst 1 # number of overall MSHR hits system.cpu.l2cache.overall_mshr_hits::cpu.data 6 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 8 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 7 # number of overall MSHR hits system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 14544 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 14544 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 773 # number of ReadCleanReq MSHR misses system.cpu.l2cache.ReadCleanReq_mshr_misses::total 773 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 256 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 256 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 257 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 257 # number of ReadSharedReq MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.inst 773 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 14800 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 15573 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 14801 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 15574 # number of demand (read+write) MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.inst 773 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 14800 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 15573 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 921040500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 921040500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 50052500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 50052500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 19092500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 19092500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 50052500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 940133000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 990185500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 50052500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 940133000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 990185500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::cpu.data 14801 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 15574 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 923193000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 923193000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 50340000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 50340000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 19328000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 19328000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 50340000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 942521000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 992861000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 50340000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 942521000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 992861000 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.311008 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.311008 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.965044 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.965044 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.000283 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.000283 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.000284 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.000284 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.965044 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.015576 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.016375 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.015577 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.016376 # mshr miss rate for demand accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.965044 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.015576 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.016375 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 63327.867162 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 63327.867162 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64750.970246 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64750.970246 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 74580.078125 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 74580.078125 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64750.970246 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 63522.500000 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 63583.477814 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64750.970246 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 63522.500000 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 63583.477814 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 1897096 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 946118 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.015577 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.016376 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 63475.866337 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 63475.866337 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 65122.897801 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 65122.897801 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 75206.225681 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 75206.225681 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 65122.897801 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 63679.548679 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 63751.187877 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 65122.897801 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 63679.548679 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 63751.187877 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 1897104 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 946122 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 150 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 904230 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 943278 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 904234 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 943282 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackClean 5 # Transaction distribution system.cpu.toL2Bus.trans_dist::CleanEvict 2819 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 46764 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 46764 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadCleanReq 801 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 903429 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 903433 # Transaction distribution system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1607 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2846483 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 2848090 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2846495 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 2848102 # Packet count per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 51584 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 121182144 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 121233728 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 121182656 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 121234240 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 950994 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 950998 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000175 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.013211 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 950828 99.98% 99.98% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 950832 99.98% 99.98% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::1 166 0.02% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 950994 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 1891831000 # Layer occupancy (ticks) -system.cpu.toL2Bus.reqLayer0.utilization 3.1 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 1202498 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 950998 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 1891839000 # Layer occupancy (ticks) +system.cpu.toL2Bus.reqLayer0.utilization 3.0 # Layer utilization (%) +system.cpu.toL2Bus.respLayer0.occupancy 1201999 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 1425292494 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 1425298494 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 2.3 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 61234797500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 1029 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 62408957500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 1030 # Transaction distribution system.membus.trans_dist::ReadExReq 14544 # Transaction distribution system.membus.trans_dist::ReadExResp 14544 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 1029 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 31146 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 31146 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 996672 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 996672 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 1030 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 31148 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 31148 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 996736 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 996736 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 15573 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 15574 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 15573 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 15574 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 15573 # Request fanout histogram -system.membus.reqLayer0.occupancy 21737000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 15574 # Request fanout histogram +system.membus.reqLayer0.occupancy 21833000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 82128750 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 82137750 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.1 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/config.ini b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/config.ini index 7fcb96393..763fea8df 100644 --- a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,9 +782,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/mcf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/mcf gid=100 -input=/dist/m5/cpu2000/data/mcf/smred/input/mcf.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/mcf/smred/input/mcf.in kvmInSE=false max_stack_size=67108864 output=cout @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:268435455 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simerr b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simerr index d9d33c634..4184e8f67 100755 --- a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (256 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: CP14 unimplemented crn[8], opc1[2], crm[9], opc2[4] diff --git a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simout b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simout index 1617c9a7a..67b5f0b3c 100755 --- a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/10.mcf/arm/linux/o3-timing/sim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 15 2016 19:53:43 -gem5 started Mar 15 2016 19:54:42 -gem5 executing on dinar2c11, pid 10367 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/10.mcf/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/10.mcf/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:07:19 +gem5 executing on e108600-lin, pid 24393 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/10.mcf/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/10.mcf/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/stats.txt index 6265572cd..5f4859c30 100644 --- a/tests/long/se/10.mcf/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/10.mcf/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.058199 # Nu sim_ticks 58199030500 # Number of ticks simulated final_tick 58199030500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 220490 # Simulator instruction rate (inst/s) -host_op_rate 221588 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 141652578 # Simulator tick rate (ticks/s) -host_mem_usage 534836 # Number of bytes of host memory used -host_seconds 410.86 # Real time elapsed on the host +host_inst_rate 123305 # Simulator instruction rate (inst/s) +host_op_rate 123919 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 79216719 # Simulator tick rate (ticks/s) +host_mem_usage 487100 # Number of bytes of host memory used +host_seconds 734.68 # Real time elapsed on the host sim_insts 90589799 # Number of instructions simulated sim_ops 91041030 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1180,6 +1180,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 700274176 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 700360640 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 319939 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 11456 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 5791989 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.053010 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.224658 # Request fanout histogram @@ -1212,6 +1213,7 @@ system.membus.pkt_count::total 33275 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 1068224 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 1068224 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 16759 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/config.ini b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/config.ini index a3f3e3177..c6db85421 100644 --- a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/config.ini +++ b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -83,13 +97,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -99,6 +118,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -107,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -123,13 +148,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -139,6 +169,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -147,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -171,13 +207,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -187,6 +228,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -195,19 +237,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -215,6 +269,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -229,9 +290,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/sparc/linux/mcf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/sparc/linux/mcf gid=100 -input=/scratch/nilay/GEM5/dist/m5/cpu2000/data/mcf/smred/input/mcf.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/mcf/smred/input/mcf.in kvmInSE=false max_stack_size=67108864 output=cout @@ -261,9 +322,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -278,11 +345,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:268435455 port=system.membus.master[0] diff --git a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simerr b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simerr +++ b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simout b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simout index e66d5ccc4..8bd59a796 100755 --- a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simout +++ b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/SPARC/tests/opt/long/se/10.mcf/sparc/linux/simple-ti gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 22:05:26 -gem5 started Sep 14 2015 22:06:13 -gem5 executing on ribera.cs.wisc.edu -command line: build/SPARC/gem5.opt -d build/SPARC/tests/opt/long/se/10.mcf/sparc/linux/simple-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/SPARC/tests/opt/long/se/10.mcf/sparc/linux/simple-timing +gem5 compiled Jul 21 2016 14:30:06 +gem5 started Jul 21 2016 14:30:36 +gem5 executing on e108600-lin, pid 38669 +command line: /work/curdun01/gem5-external.hg/build/SPARC/gem5.opt -d build/SPARC/tests/opt/long/se/10.mcf/sparc/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/10.mcf/sparc/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -26,4 +26,4 @@ simplex iterations : 2663 flow value : 3080014995 checksum : 68389 optimal -Exiting @ tick 361488535500 because target called exit() +Exiting @ tick 361597758500 because target called exit() diff --git a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/stats.txt b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/stats.txt index b73adbdfb..b912f8d81 100644 --- a/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/stats.txt +++ b/tests/long/se/10.mcf/ref/sparc/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.361598 # Nu sim_ticks 361597758500 # Number of ticks simulated final_tick 361597758500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1652209 # Simulator instruction rate (inst/s) -host_op_rate 1652277 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2450259534 # Simulator tick rate (ticks/s) -host_mem_usage 427260 # Number of bytes of host memory used -host_seconds 147.58 # Real time elapsed on the host +host_inst_rate 1165746 # Simulator instruction rate (inst/s) +host_op_rate 1165794 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1728825291 # Simulator tick rate (ticks/s) +host_mem_usage 381188 # Number of bytes of host memory used +host_seconds 209.16 # Real time elapsed on the host sim_insts 243825150 # Number of instructions simulated sim_ops 243835265 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -486,6 +486,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 119989568 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 120047616 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 940453 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000001 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.001031 # Request fanout histogram @@ -513,6 +514,7 @@ system.membus.pkt_count::total 31206 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 998592 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 998592 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 15603 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/config.ini b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/config.ini index 23b1feb0a..fa42af61f 100644 --- a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/config.ini +++ b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -70,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -106,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -151,11 +163,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -164,12 +183,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -188,8 +212,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -203,8 +232,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -522,12 +556,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -546,18 +585,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -577,8 +626,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -589,12 +643,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -613,8 +672,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -622,10 +686,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -656,9 +725,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/x86/linux/mcf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/mcf gid=100 -input=/dist/m5/cpu2000/data/mcf/smred/input/mcf.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/mcf/smred/input/mcf.in kvmInSE=false max_stack_size=67108864 output=cout @@ -688,10 +757,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -735,6 +809,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -746,7 +821,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:268435455 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simerr b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simerr index d6398cf75..36f24465c 100755 --- a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simerr +++ b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simerr @@ -1 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (256 Mbytes) +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simout b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simout index 072ce04c3..e1bfb6d2d 100755 --- a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simout +++ b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/X86/tests/opt/long/se/10.mcf/x86/linux/o3-timing/sim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:38:19 -gem5 started Mar 16 2016 15:38:47 -gem5 executing on dinar2c11, pid 14352 -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/10.mcf/x86/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/X86/tests/opt/long/se/10.mcf/x86/linux/o3-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:18 +gem5 executing on e108600-lin, pid 18558 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/10.mcf/x86/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/10.mcf/x86/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -23,8 +23,9 @@ info: Increasing stack size by one page. flow value : 4990014995 new implicit arcs : 23867 active arcs : 25772 +info: Increasing stack size by one page. simplex iterations : 2663 flow value : 3080014995 checksum : 68389 optimal -Exiting @ tick 61602281500 because target called exit() +Exiting @ tick 65986743500 because target called exit() diff --git a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/stats.txt b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/stats.txt index a14ecd97e..4b965d579 100644 --- a/tests/long/se/10.mcf/ref/x86/linux/o3-timing/stats.txt +++ b/tests/long/se/10.mcf/ref/x86/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.065987 # Nu sim_ticks 65986743500 # Number of ticks simulated final_tick 65986743500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 167131 # Simulator instruction rate (inst/s) -host_op_rate 294291 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 69805272 # Simulator tick rate (ticks/s) -host_mem_usage 458048 # Number of bytes of host memory used -host_seconds 945.30 # Real time elapsed on the host +host_inst_rate 86207 # Simulator instruction rate (inst/s) +host_op_rate 151797 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 36005878 # Simulator tick rate (ticks/s) +host_mem_usage 411344 # Number of bytes of host memory used +host_seconds 1832.67 # Real time elapsed on the host sim_insts 157988547 # Number of instructions simulated sim_ops 278192464 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -978,6 +978,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 265252672 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 265329856 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 650 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 17920 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 2079367 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000167 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.012936 # Request fanout histogram @@ -1009,6 +1010,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 197 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 1977728 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 1977728 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 30947 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/config.ini b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/config.ini index f0cb43f99..420cd8ed8 100644 --- a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/config.ini +++ b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -89,13 +103,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -105,6 +124,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -113,8 +133,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -128,8 +153,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -139,13 +169,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -155,6 +190,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -163,18 +199,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -194,8 +240,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -205,13 +256,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -221,6 +277,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -229,19 +286,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -249,6 +318,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -263,9 +339,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/x86/linux/mcf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/mcf gid=100 -input=/scratch/nilay/GEM5/dist/m5/cpu2000/data/mcf/smred/input/mcf.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/mcf/smred/input/mcf.in kvmInSE=false max_stack_size=67108864 output=cout @@ -295,9 +371,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -312,11 +394,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:268435455 port=system.membus.master[0] diff --git a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simerr b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simerr +++ b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simout b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simout index dc48bfe97..657298ab6 100755 --- a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simout +++ b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/X86/tests/opt/long/se/10.mcf/x86/linux/simple-timing gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 22:13:36 -gem5 started Sep 14 2015 23:02:52 -gem5 executing on ribera.cs.wisc.edu -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/10.mcf/x86/linux/simple-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/X86/tests/opt/long/se/10.mcf/x86/linux/simple-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:18 +gem5 executing on e108600-lin, pid 18549 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/10.mcf/x86/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/10.mcf/x86/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -26,4 +26,4 @@ simplex iterations : 2663 flow value : 3080014995 checksum : 68389 optimal -Exiting @ tick 365988859500 because target called exit() +Exiting @ tick 366199170500 because target called exit() diff --git a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/stats.txt b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/stats.txt index 8ce0adaa4..8197faf7d 100644 --- a/tests/long/se/10.mcf/ref/x86/linux/simple-timing/stats.txt +++ b/tests/long/se/10.mcf/ref/x86/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.366199 # Nu sim_ticks 366199170500 # Number of ticks simulated final_tick 366199170500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 926071 # Simulator instruction rate (inst/s) -host_op_rate 1630662 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2146525407 # Simulator tick rate (ticks/s) -host_mem_usage 453968 # Number of bytes of host memory used -host_seconds 170.60 # Real time elapsed on the host +host_inst_rate 454673 # Simulator instruction rate (inst/s) +host_op_rate 800606 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1053878980 # Simulator tick rate (ticks/s) +host_mem_usage 406480 # Number of bytes of host memory used +host_seconds 347.48 # Real time elapsed on the host sim_insts 157988548 # Number of instructions simulated sim_ops 278192465 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -480,6 +480,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 264275904 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 264329152 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 313 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 6528 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 2067950 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000095 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.009760 # Request fanout histogram @@ -511,6 +512,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 192 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 1929344 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 1929344 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 30160 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/config.ini index 9e17532ff..d14e71c27 100644 --- a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,9 +729,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/parser gid=100 -input=/scratch/nilay/GEM5/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simout b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simout index f3df2a37b..48ddcf72a 100755 --- a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/20.parser/alpha/tru64/minor- gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:15:04 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/20.parser/alpha/tru64/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/20.parser/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4298 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/20.parser/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -69,4 +69,4 @@ Echoing of input sentence turned on. about 2 million people attended the five best costumes got prizes No errors! -Exiting @ tick 412080064500 because target called exit() +Exiting @ tick 417309765500 because target called exit() diff --git a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/stats.txt index aa609094f..2a8feed05 100644 --- a/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/20.parser/ref/alpha/tru64/minor-timing/stats.txt @@ -1,96 +1,96 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.412080 # Number of seconds simulated -sim_ticks 412079966500 # Number of ticks simulated -final_tick 412079966500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.417310 # Number of seconds simulated +sim_ticks 417309765500 # Number of ticks simulated +final_tick 417309765500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 523017 # Simulator instruction rate (inst/s) -host_op_rate 523017 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 352221098 # Simulator tick rate (ticks/s) -host_mem_usage 299640 # Number of bytes of host memory used -host_seconds 1169.95 # Real time elapsed on the host +host_inst_rate 274693 # Simulator instruction rate (inst/s) +host_op_rate 274693 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 187337647 # Simulator tick rate (ticks/s) +host_mem_usage 252076 # Number of bytes of host memory used +host_seconds 2227.58 # Real time elapsed on the host sim_insts 611901617 # Number of instructions simulated sim_ops 611901617 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 156608 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 24143296 # Number of bytes read from this memory -system.physmem.bytes_read::total 24299904 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 156608 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 156608 # Number of instructions bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 156544 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 24144128 # Number of bytes read from this memory +system.physmem.bytes_read::total 24300672 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 156544 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 156544 # Number of instructions bytes read from this memory system.physmem.bytes_written::writebacks 18790848 # Number of bytes written to this memory system.physmem.bytes_written::total 18790848 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 2447 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 377239 # Number of read requests responded to by this memory -system.physmem.num_reads::total 379686 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.inst 2446 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 377252 # Number of read requests responded to by this memory +system.physmem.num_reads::total 379698 # Number of read requests responded to by this memory system.physmem.num_writes::writebacks 293607 # Number of write requests responded to by this memory system.physmem.num_writes::total 293607 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 380043 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 58588861 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 58968904 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 380043 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 380043 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 45600004 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 45600004 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 45600004 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 380043 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 58588861 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 104568908 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 379686 # Number of read requests accepted +system.physmem.bw_read::cpu.inst 375127 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 57856609 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 58231736 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 375127 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 375127 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 45028536 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 45028536 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 45028536 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 375127 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 57856609 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 103260272 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 379698 # Number of read requests accepted system.physmem.writeReqs 293607 # Number of write requests accepted -system.physmem.readBursts 379686 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 379698 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 293607 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 24278080 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 21824 # Total number of bytes read from write queue -system.physmem.bytesWritten 18789376 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 24299904 # Total read bytes from the system interface side +system.physmem.bytesReadDRAM 24277632 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 23040 # Total number of bytes read from write queue +system.physmem.bytesWritten 18789440 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 24300672 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 18790848 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 341 # Number of DRAM read bursts serviced by the write queue +system.physmem.servicedByWrQ 360 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 23685 # Per bank write bursts -system.physmem.perBankRdBursts::1 23156 # Per bank write bursts +system.physmem.perBankRdBursts::0 23694 # Per bank write bursts +system.physmem.perBankRdBursts::1 23158 # Per bank write bursts system.physmem.perBankRdBursts::2 23444 # Per bank write bursts -system.physmem.perBankRdBursts::3 24498 # Per bank write bursts -system.physmem.perBankRdBursts::4 25450 # Per bank write bursts -system.physmem.perBankRdBursts::5 23569 # Per bank write bursts -system.physmem.perBankRdBursts::6 23652 # Per bank write bursts -system.physmem.perBankRdBursts::7 23913 # Per bank write bursts -system.physmem.perBankRdBursts::8 23182 # Per bank write bursts -system.physmem.perBankRdBursts::9 23988 # Per bank write bursts -system.physmem.perBankRdBursts::10 24719 # Per bank write bursts -system.physmem.perBankRdBursts::11 22783 # Per bank write bursts -system.physmem.perBankRdBursts::12 23722 # Per bank write bursts -system.physmem.perBankRdBursts::13 24391 # Per bank write bursts -system.physmem.perBankRdBursts::14 22743 # Per bank write bursts -system.physmem.perBankRdBursts::15 22450 # Per bank write bursts +system.physmem.perBankRdBursts::3 24500 # Per bank write bursts +system.physmem.perBankRdBursts::4 25443 # Per bank write bursts +system.physmem.perBankRdBursts::5 23576 # Per bank write bursts +system.physmem.perBankRdBursts::6 23654 # Per bank write bursts +system.physmem.perBankRdBursts::7 23908 # Per bank write bursts +system.physmem.perBankRdBursts::8 23181 # Per bank write bursts +system.physmem.perBankRdBursts::9 23984 # Per bank write bursts +system.physmem.perBankRdBursts::10 24716 # Per bank write bursts +system.physmem.perBankRdBursts::11 22779 # Per bank write bursts +system.physmem.perBankRdBursts::12 23723 # Per bank write bursts +system.physmem.perBankRdBursts::13 24392 # Per bank write bursts +system.physmem.perBankRdBursts::14 22740 # Per bank write bursts +system.physmem.perBankRdBursts::15 22446 # Per bank write bursts system.physmem.perBankWrBursts::0 17782 # Per bank write bursts -system.physmem.perBankWrBursts::1 17456 # Per bank write bursts -system.physmem.perBankWrBursts::2 17945 # Per bank write bursts +system.physmem.perBankWrBursts::1 17457 # Per bank write bursts +system.physmem.perBankWrBursts::2 17944 # Per bank write bursts system.physmem.perBankWrBursts::3 18853 # Per bank write bursts -system.physmem.perBankWrBursts::4 19514 # Per bank write bursts -system.physmem.perBankWrBursts::5 18590 # Per bank write bursts +system.physmem.perBankWrBursts::4 19512 # Per bank write bursts +system.physmem.perBankWrBursts::5 18592 # Per bank write bursts system.physmem.perBankWrBursts::6 18778 # Per bank write bursts -system.physmem.perBankWrBursts::7 18659 # Per bank write bursts +system.physmem.perBankWrBursts::7 18657 # Per bank write bursts system.physmem.perBankWrBursts::8 18440 # Per bank write bursts -system.physmem.perBankWrBursts::9 18941 # Per bank write bursts -system.physmem.perBankWrBursts::10 19257 # Per bank write bursts +system.physmem.perBankWrBursts::9 18940 # Per bank write bursts +system.physmem.perBankWrBursts::10 19258 # Per bank write bursts system.physmem.perBankWrBursts::11 18049 # Per bank write bursts -system.physmem.perBankWrBursts::12 18261 # Per bank write bursts +system.physmem.perBankWrBursts::12 18265 # Per bank write bursts system.physmem.perBankWrBursts::13 18732 # Per bank write bursts -system.physmem.perBankWrBursts::14 17196 # Per bank write bursts +system.physmem.perBankWrBursts::14 17195 # Per bank write bursts system.physmem.perBankWrBursts::15 17131 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 412079864500 # Total gap between requests +system.physmem.totGap 417309678500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 379686 # Read request sizes (log2) +system.physmem.readPktSize::6 379698 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -98,9 +98,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 293607 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 377956 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 1374 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 15 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 378264 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 1069 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 5 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,37 +145,37 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 6977 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 7349 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 16982 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 17402 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 17456 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 17496 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 17482 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 17480 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 17464 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 17469 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 17515 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 17450 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 17520 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 17545 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 17504 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 17668 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 17405 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 17352 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 38 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::34 18 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::35 10 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::36 6 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::37 3 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::38 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::39 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::40 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::41 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::42 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::43 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::44 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::45 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 6953 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 7342 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 17059 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 17391 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 17449 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 17455 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 17484 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 17463 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 17461 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 17480 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 17555 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 17498 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 17528 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 17548 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 17503 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 17619 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 17379 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 17341 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 17 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::34 15 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::35 8 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::36 9 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::37 6 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::38 7 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::39 5 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::40 3 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::41 3 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::42 3 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::43 4 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::44 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::45 2 # What write queue length does an incoming req see system.physmem.wrQLenPdf::46 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::47 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::48 0 # What write queue length does an incoming req see @@ -194,130 +194,128 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 142401 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 302.436977 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 179.883041 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 323.731419 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 50699 35.60% 35.60% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 38890 27.31% 62.91% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 13190 9.26% 72.18% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 8518 5.98% 78.16% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 5731 4.02% 82.18% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 3813 2.68% 84.86% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 2946 2.07% 86.93% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 2544 1.79% 88.71% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 16070 11.29% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 142401 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 17327 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 21.892595 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 236.629202 # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 142524 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 302.166540 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 179.513789 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 323.994907 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 50939 35.74% 35.74% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 38821 27.24% 62.98% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 13298 9.33% 72.31% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 8416 5.90% 78.21% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 5517 3.87% 82.09% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 3864 2.71% 84.80% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 2991 2.10% 86.89% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 2664 1.87% 88.76% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 16014 11.24% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 142524 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 17328 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 21.890986 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 236.476851 # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::0-1023 17319 99.95% 99.95% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::1024-2047 4 0.02% 99.98% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::1024-2047 5 0.03% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::2048-3071 1 0.01% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::3072-4095 1 0.01% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::8192-9215 1 0.01% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::28672-29695 1 0.01% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 17327 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 17327 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 16.943729 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 16.871773 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 3.342990 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16-23 17278 99.72% 99.72% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::24-31 33 0.19% 99.91% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::32-39 6 0.03% 99.94% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::40-47 2 0.01% 99.95% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::48-55 2 0.01% 99.97% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::56-63 2 0.01% 99.98% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::72-79 1 0.01% 99.98% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::96-103 1 0.01% 99.99% # Writes before turning the bus around for reads +system.physmem.rdPerTurnAround::total 17328 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 17328 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 16.942809 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 16.869717 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 3.235744 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16-23 17276 99.70% 99.70% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::24-31 34 0.20% 99.90% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::32-39 12 0.07% 99.97% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::56-63 1 0.01% 99.97% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::64-71 2 0.01% 99.98% # Writes before turning the bus around for reads system.physmem.wrPerTurnAround::112-119 1 0.01% 99.99% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::392-399 1 0.01% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 17327 # Writes before turning the bus around for reads -system.physmem.totQLat 4062204500 # Total ticks spent queuing -system.physmem.totMemAccLat 11174923250 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 1896725000 # Total ticks spent in databus transfers -system.physmem.avgQLat 10708.47 # Average queueing delay per DRAM burst +system.physmem.wrPerTurnAround::216-223 1 0.01% 99.99% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::328-335 1 0.01% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 17328 # Writes before turning the bus around for reads +system.physmem.totQLat 4040781000 # Total ticks spent queuing +system.physmem.totMemAccLat 11153368500 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 1896690000 # Total ticks spent in databus transfers +system.physmem.avgQLat 10652.19 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 29458.47 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 58.92 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 45.60 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 58.97 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 45.60 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 29402.19 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 58.18 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 45.03 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 58.23 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 45.03 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 0.82 # Data bus utilization in percentage -system.physmem.busUtilRead 0.46 # Data bus utilization in percentage for reads -system.physmem.busUtilWrite 0.36 # Data bus utilization in percentage for writes +system.physmem.busUtil 0.81 # Data bus utilization in percentage +system.physmem.busUtilRead 0.45 # Data bus utilization in percentage for reads +system.physmem.busUtilWrite 0.35 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing -system.physmem.avgWrQLen 21.44 # Average write queue length when enqueuing -system.physmem.readRowHits 314203 # Number of row buffer hits during reads -system.physmem.writeRowHits 216323 # Number of row buffer hits during writes -system.physmem.readRowHitRate 82.83 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 73.68 # Row buffer hit rate for writes -system.physmem.avgGap 612036.46 # Average gap between requests -system.physmem.pageHitRate 78.84 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 547933680 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 298971750 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 1492662600 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 956298960 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 26915029440 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 62025350850 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 192839650500 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 285075897780 # Total energy per rank (pJ) -system.physmem_0.averagePower 691.797872 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 320257941500 # Time in different power states -system.physmem_0.memoryStateTime::REF 13760240000 # Time in different power states +system.physmem.avgWrQLen 20.54 # Average write queue length when enqueuing +system.physmem.readRowHits 314151 # Number of row buffer hits during reads +system.physmem.writeRowHits 216242 # Number of row buffer hits during writes +system.physmem.readRowHitRate 82.82 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 73.65 # Row buffer hit rate for writes +system.physmem.avgGap 619792.93 # Average gap between requests +system.physmem.pageHitRate 78.82 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 548954280 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 299528625 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 1492608000 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 956117520 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 27256273200 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 62660545740 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 195417206250 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 288631233615 # Total energy per rank (pJ) +system.physmem_0.averagePower 691.656457 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 324545157250 # Time in different power states +system.physmem_0.memoryStateTime::REF 13934700000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 78061587250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 78824485250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 528617880 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 288432375 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 1466212800 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 946125360 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 26915029440 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 58968919935 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 195520730250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 284634068040 # Total energy per rank (pJ) -system.physmem_1.averagePower 690.725678 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 324739070000 # Time in different power states -system.physmem_1.memoryStateTime::REF 13760240000 # Time in different power states +system.physmem_1.actEnergy 528194520 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 288201375 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 1465682400 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 946002240 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 27256273200 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 59613271875 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 198090253500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 288187879110 # Total energy per rank (pJ) +system.physmem_1.averagePower 690.594032 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 329008482750 # Time in different power states +system.physmem_1.memoryStateTime::REF 13934700000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 73580458750 # Time in different power states +system.physmem_1.memoryStateTime::ACT 74361159750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 123917421 # Number of BP lookups -system.cpu.branchPred.condPredicted 87658943 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 6214661 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 71578372 # Number of BTB lookups -system.cpu.branchPred.BTBHits 67267052 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 124433672 # Number of BP lookups +system.cpu.branchPred.condPredicted 87996740 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 6213240 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 71713354 # Number of BTB lookups +system.cpu.branchPred.BTBHits 67453022 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 93.976784 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 15041989 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 1126026 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 7056 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 4451 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 2605 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 734 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 94.059221 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 15161941 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 1121063 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 7034 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 4431 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 2603 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 736 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 149344684 # DTB read hits -system.cpu.dtb.read_misses 549067 # DTB read misses +system.cpu.dtb.read_hits 149830728 # DTB read hits +system.cpu.dtb.read_misses 559355 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 149893751 # DTB read accesses -system.cpu.dtb.write_hits 57319581 # DTB write hits -system.cpu.dtb.write_misses 63710 # DTB write misses +system.cpu.dtb.read_accesses 150390083 # DTB read accesses +system.cpu.dtb.write_hits 57603616 # DTB write hits +system.cpu.dtb.write_misses 71398 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 57383291 # DTB write accesses -system.cpu.dtb.data_hits 206664265 # DTB hits -system.cpu.dtb.data_misses 612777 # DTB misses +system.cpu.dtb.write_accesses 57675014 # DTB write accesses +system.cpu.dtb.data_hits 207434344 # DTB hits +system.cpu.dtb.data_misses 630753 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 207277042 # DTB accesses -system.cpu.itb.fetch_hits 226050668 # ITB hits +system.cpu.dtb.data_accesses 208065097 # DTB accesses +system.cpu.itb.fetch_hits 227957182 # ITB hits system.cpu.itb.fetch_misses 48 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 226050716 # ITB accesses +system.cpu.itb.fetch_accesses 227957230 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -331,16 +329,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 485 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 824159933 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 834619531 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 611901617 # Number of instructions committed system.cpu.committedOps 611901617 # Number of ops (including micro ops) committed -system.cpu.discardedOps 12834895 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 14840405 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.346883 # CPI: cycles per instruction -system.cpu.ipc 0.742455 # IPC: instructions per cycle +system.cpu.cpi 1.363977 # CPI: cycles per instruction +system.cpu.ipc 0.733150 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 52179272 8.53% 8.53% # Class of committed instruction system.cpu.op_class_0::IntAlu 355264620 58.06% 66.59% # Class of committed instruction system.cpu.op_class_0::IntMult 152833 0.02% 66.61% # Class of committed instruction @@ -376,315 +374,315 @@ system.cpu.op_class_0::MemWrite 57220983 9.35% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 611901617 # Class of committed instruction -system.cpu.tickCycles 739333991 # Number of cycles that the object actually ticked -system.cpu.idleCycles 84825942 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 2535268 # number of replacements -system.cpu.dcache.tags.tagsinuse 4087.644038 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 202570428 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 2539364 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 79.772111 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 1636792500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4087.644038 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.997960 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.997960 # Average percentage of cache occupancy +system.cpu.tickCycles 746834256 # Number of cycles that the object actually ticked +system.cpu.idleCycles 87785275 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 2535509 # number of replacements +system.cpu.dcache.tags.tagsinuse 4087.685849 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 203187427 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 2539605 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 80.007492 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 1653740500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4087.685849 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.997970 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.997970 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 49 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 48 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 73 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 829 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 3145 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 828 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 3147 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 414584966 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 414584966 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 146904269 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 146904269 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 55666159 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 55666159 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 202570428 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 202570428 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 202570428 # number of overall hits -system.cpu.dcache.overall_hits::total 202570428 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 1908498 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 1908498 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 1543875 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 1543875 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 3452373 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 3452373 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 3452373 # number of overall misses -system.cpu.dcache.overall_misses::total 3452373 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 37718879500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 37718879500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 47736374000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 47736374000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 85455253500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 85455253500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 85455253500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 85455253500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 148812767 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 148812767 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.tags.tag_accesses 415624619 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 415624619 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 147521260 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 147521260 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 55666167 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 55666167 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 203187427 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 203187427 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 203187427 # number of overall hits +system.cpu.dcache.overall_hits::total 203187427 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 1811213 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 1811213 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 1543867 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 1543867 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 3355080 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 3355080 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 3355080 # number of overall misses +system.cpu.dcache.overall_misses::total 3355080 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 36182187000 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 36182187000 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 47720909500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 47720909500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 83903096500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 83903096500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 83903096500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 83903096500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 149332473 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 149332473 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 57210034 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 57210034 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 206022801 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 206022801 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 206022801 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 206022801 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.012825 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.012825 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 206542507 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 206542507 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 206542507 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 206542507 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.012129 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.012129 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.026986 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.026986 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.016757 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.016757 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.016757 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.016757 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 19763.646333 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 19763.646333 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 30919.843899 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 30919.843899 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 24752.613203 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 24752.613203 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 24752.613203 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 24752.613203 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.016244 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.016244 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.016244 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.016244 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 19976.770816 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 19976.770816 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 30909.987389 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 30909.987389 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 25007.778205 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 25007.778205 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 25007.778205 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 25007.778205 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 2339413 # number of writebacks -system.cpu.dcache.writebacks::total 2339413 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 143957 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 143957 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 769052 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 769052 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 913009 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 913009 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 913009 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 913009 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 1764541 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 1764541 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 774823 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 774823 # number of WriteReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 2539364 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 2539364 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 2539364 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 2539364 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 33202779000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 33202779000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 23350926000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 23350926000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 56553705000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 56553705000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 56553705000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 56553705000 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.011857 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.011857 # mshr miss rate for ReadReq accesses +system.cpu.dcache.writebacks::writebacks 2339608 # number of writebacks +system.cpu.dcache.writebacks::total 2339608 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 46417 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 46417 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 769058 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 769058 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 815475 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 815475 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 815475 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 815475 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 1764796 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 1764796 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 774809 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 774809 # number of WriteReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 2539605 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 2539605 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 2539605 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 2539605 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 33173534500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 33173534500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 23341678000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 23341678000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 56515212500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 56515212500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 56515212500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 56515212500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.011818 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.011818 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.013543 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.013543 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.012326 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.012326 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.012326 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.012326 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 18816.666204 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 18816.666204 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 30137.110024 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 30137.110024 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 22270.814661 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 22270.814661 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 22270.814661 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 22270.814661 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 3158 # number of replacements -system.cpu.icache.tags.tagsinuse 1117.678366 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 226045682 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 4986 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 45336.077417 # Average number of references to valid blocks. +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.012296 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.012296 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.012296 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.012296 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 18797.376297 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 18797.376297 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 30125.718726 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 30125.718726 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 22253.544350 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 22253.544350 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 22253.544350 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 22253.544350 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 3176 # number of replacements +system.cpu.icache.tags.tagsinuse 1116.866766 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 227952177 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 5005 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 45544.890509 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1117.678366 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.545741 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.545741 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1828 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 66 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 1116.866766 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.545345 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.545345 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1829 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 65 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::1 80 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::2 17 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::3 75 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1590 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.892578 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 452106322 # Number of tag accesses -system.cpu.icache.tags.data_accesses 452106322 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 226045682 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 226045682 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 226045682 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 226045682 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 226045682 # number of overall hits -system.cpu.icache.overall_hits::total 226045682 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 4986 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 4986 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 4986 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 4986 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 4986 # number of overall misses -system.cpu.icache.overall_misses::total 4986 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 233628500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 233628500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 233628500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 233628500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 233628500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 233628500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 226050668 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 226050668 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 226050668 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 226050668 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 226050668 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 226050668 # number of overall (read+write) accesses +system.cpu.icache.tags.age_task_id_blocks_1024::4 1592 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.893066 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 455919369 # Number of tag accesses +system.cpu.icache.tags.data_accesses 455919369 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 227952177 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 227952177 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 227952177 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 227952177 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 227952177 # number of overall hits +system.cpu.icache.overall_hits::total 227952177 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 5005 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 5005 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 5005 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 5005 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 5005 # number of overall misses +system.cpu.icache.overall_misses::total 5005 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 230776000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 230776000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 230776000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 230776000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 230776000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 230776000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 227957182 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 227957182 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 227957182 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 227957182 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 227957182 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 227957182 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000022 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000022 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000022 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000022 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000022 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000022 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 46856.899318 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 46856.899318 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 46856.899318 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 46856.899318 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 46856.899318 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 46856.899318 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 46109.090909 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 46109.090909 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 46109.090909 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 46109.090909 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 46109.090909 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 46109.090909 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 3158 # number of writebacks -system.cpu.icache.writebacks::total 3158 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 4986 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 4986 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 4986 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 4986 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 4986 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 4986 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 228642500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 228642500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 228642500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 228642500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 228642500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 228642500 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 3176 # number of writebacks +system.cpu.icache.writebacks::total 3176 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 5005 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 5005 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 5005 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 5005 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 5005 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 5005 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 225771000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 225771000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 225771000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 225771000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 225771000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 225771000 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000022 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000022 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000022 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000022 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000022 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000022 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 45856.899318 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 45856.899318 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 45856.899318 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 45856.899318 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 45856.899318 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 45856.899318 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 347705 # number of replacements -system.cpu.l2cache.tags.tagsinuse 29504.977164 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 3908748 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 380135 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 10.282526 # Average number of references to valid blocks. -system.cpu.l2cache.tags.warmup_cycle 189119343500 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 21322.016390 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 160.931124 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 8022.029650 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.650696 # Average percentage of cache occupancy +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 45109.090909 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 45109.090909 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 45109.090909 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 45109.090909 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 45109.090909 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 45109.090909 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 347716 # number of replacements +system.cpu.l2cache.tags.tagsinuse 29508.447379 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 3909297 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 380147 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 10.283646 # Average number of references to valid blocks. +system.cpu.l2cache.tags.warmup_cycle 191524989500 # Cycle when the warmup percentage was hit. +system.cpu.l2cache.tags.occ_blocks::writebacks 21334.159610 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 160.927719 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 8013.360050 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.651067 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_percent::cpu.inst 0.004911 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.244813 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.900420 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 32430 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 157 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_percent::cpu.data 0.244548 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.900526 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 32431 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 155 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 122 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::2 223 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::3 13172 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 18756 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.989685 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 41820503 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 41820503 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 2339413 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 2339413 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 3158 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 3158 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 571852 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 571852 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 2539 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 2539 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 1590273 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 1590273 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 2539 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 2162125 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 2164664 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 2539 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 2162125 # number of overall hits -system.cpu.l2cache.overall_hits::total 2164664 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 206308 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 206308 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2447 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 2447 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 170931 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 170931 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 2447 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 377239 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 379686 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 2447 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 377239 # number of overall misses -system.cpu.l2cache.overall_misses::total 379686 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 16226611500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 16226611500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 194481500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 194481500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 13777909500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 13777909500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 194481500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 30004521000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 30199002500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 194481500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 30004521000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 30199002500 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 2339413 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 2339413 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 3158 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 3158 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 778160 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 778160 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 4986 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 4986 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 1761204 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 1761204 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 4986 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 2539364 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 2544350 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 4986 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 2539364 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 2544350 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.265123 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.265123 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.490774 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.490774 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.097053 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.097053 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.490774 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.148556 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.149227 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.490774 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.148556 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.149227 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 78652.362002 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 78652.362002 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79477.523498 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79477.523498 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80605.095038 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80605.095038 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79477.523498 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 79537.166094 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 79536.781709 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79477.523498 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 79537.166094 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 79536.781709 # average overall miss latency +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 18759 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.989716 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 41824659 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 41824659 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 2339608 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 2339608 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 3176 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 3176 # number of WritebackClean hits +system.cpu.l2cache.ReadExReq_hits::cpu.data 571847 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 571847 # number of ReadExReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 2559 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 2559 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 1590506 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 1590506 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 2559 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 2162353 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 2164912 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 2559 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 2162353 # number of overall hits +system.cpu.l2cache.overall_hits::total 2164912 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 206305 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 206305 # number of ReadExReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2446 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 2446 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 170947 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 170947 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 2446 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 377252 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 379698 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 2446 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 377252 # number of overall misses +system.cpu.l2cache.overall_misses::total 379698 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 16217980000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 16217980000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 191375500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 191375500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 13768542000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 13768542000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 191375500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 29986522000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 30177897500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 191375500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 29986522000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 30177897500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 2339608 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 2339608 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 3176 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 3176 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 778152 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 778152 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 5005 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 5005 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 1761453 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 1761453 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 5005 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 2539605 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 2544610 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 5005 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 2539605 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 2544610 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.265122 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.265122 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.488711 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.488711 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.097049 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.097049 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.488711 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.148548 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.149217 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.488711 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.148548 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.149217 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 78611.667192 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 78611.667192 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 78240.188062 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 78240.188062 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80542.753017 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80542.753017 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 78240.188062 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 79486.714451 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 79478.684375 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 78240.188062 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 79486.714451 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 79478.684375 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -695,120 +693,122 @@ system.cpu.l2cache.writebacks::writebacks 293607 # n system.cpu.l2cache.writebacks::total 293607 # number of writebacks system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 5 # number of CleanEvict MSHR misses system.cpu.l2cache.CleanEvict_mshr_misses::total 5 # number of CleanEvict MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 206308 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 206308 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2447 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2447 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 170931 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 170931 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 2447 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 377239 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 379686 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 2447 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 377239 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 379686 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 14163531500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 14163531500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 170011500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 170011500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 12068599500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 12068599500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 170011500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 26232131000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 26402142500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 170011500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 26232131000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 26402142500 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 206305 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 206305 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2446 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2446 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 170947 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 170947 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 2446 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 377252 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 379698 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 2446 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 377252 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 379698 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 14154930000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 14154930000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 166915500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 166915500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 12059072000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 12059072000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 166915500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 26214002000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 26380917500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 166915500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 26214002000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 26380917500 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.265123 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.265123 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.490774 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.490774 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.097053 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.097053 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.490774 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.148556 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.149227 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.490774 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.148556 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.149227 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 68652.362002 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 68652.362002 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69477.523498 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69477.523498 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70605.095038 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70605.095038 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69477.523498 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 69537.166094 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 69536.781709 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69477.523498 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 69537.166094 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 69536.781709 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 5082776 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 2538426 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.265122 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.265122 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.488711 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.488711 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.097049 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.097049 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.488711 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.148548 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.149217 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.488711 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.148548 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.149217 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 68611.667192 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 68611.667192 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 68240.188062 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 68240.188062 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70542.753017 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70542.753017 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 68240.188062 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 69486.714451 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 69478.684375 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 68240.188062 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 69486.714451 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 69478.684375 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 5083295 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 2538685 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 2394 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2394 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 2395 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2395 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 1766190 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 2633020 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 3158 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 249953 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 778160 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 778160 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 4986 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 1761204 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 13130 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 7613996 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 7627126 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 521216 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 312241728 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 312762944 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 347705 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 2892055 # Request fanout histogram +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 1766458 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 2633215 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 3176 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 250010 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 778152 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 778152 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 5005 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 1761453 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 13186 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 7614719 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 7627905 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 523584 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 312269632 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 312793216 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 347716 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 18790848 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 2892326 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000828 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.028759 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.028764 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 2889661 99.92% 99.92% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 2394 0.08% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 2889931 99.92% 99.92% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 2395 0.08% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 2892055 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 4883959000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 2892326 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 4884431500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 1.2 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 7479000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 7507500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 3809046000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 3809407500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.9 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 412079966500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 173378 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 417309765500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 173393 # Transaction distribution system.membus.trans_dist::WritebackDirty 293607 # Transaction distribution -system.membus.trans_dist::CleanEvict 51709 # Transaction distribution -system.membus.trans_dist::ReadExReq 206308 # Transaction distribution -system.membus.trans_dist::ReadExResp 206308 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 173378 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 1104688 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 1104688 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 43090752 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 43090752 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::CleanEvict 51719 # Transaction distribution +system.membus.trans_dist::ReadExReq 206305 # Transaction distribution +system.membus.trans_dist::ReadExResp 206305 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 173393 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 1104722 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 1104722 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 43091520 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 43091520 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 725002 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 725024 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 725002 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 725024 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 725002 # Request fanout histogram -system.membus.reqLayer0.occupancy 2021006000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 725024 # Request fanout histogram +system.membus.reqLayer0.occupancy 2021857500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.5 # Layer utilization (%) -system.membus.respLayer1.occupancy 2009290500 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 2009466000 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.5 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/20.parser/ref/arm/linux/minor-timing/config.ini b/tests/long/se/20.parser/ref/arm/linux/minor-timing/config.ini index e8f37d0a8..9fc640f03 100644 --- a/tests/long/se/20.parser/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/20.parser/ref/arm/linux/minor-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -57,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -101,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -122,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -135,12 +155,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -159,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -183,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -199,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -595,12 +635,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -619,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -678,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -694,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -707,12 +767,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -731,8 +796,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -740,10 +810,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -774,9 +849,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -806,10 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -853,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -864,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/20.parser/ref/arm/linux/minor-timing/simerr b/tests/long/se/20.parser/ref/arm/linux/minor-timing/simerr index eeb19437b..caeab8324 100755 --- a/tests/long/se/20.parser/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/20.parser/ref/arm/linux/minor-timing/simerr @@ -1,2 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: CP14 unimplemented crn[8], opc1[2], crm[9], opc2[4] diff --git a/tests/long/se/20.parser/ref/arm/linux/minor-timing/simout b/tests/long/se/20.parser/ref/arm/linux/minor-timing/simout index 73f574cb5..0165cf685 100755 --- a/tests/long/se/20.parser/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/20.parser/ref/arm/linux/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/20.parser/arm/linux/minor-timi gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:51:04 -gem5 started Mar 16 2016 15:55:43 -gem5 executing on dinar2c11, pid 15340 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/minor-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/20.parser/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:21 +gem5 executing on e108600-lin, pid 23072 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -70,4 +70,4 @@ info: Increasing stack size by one page. about 2 million people attended the five best costumes got prizes No errors! -Exiting @ tick 363608804500 because target called exit() +Exiting @ tick 366439129500 because target called exit() diff --git a/tests/long/se/20.parser/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/20.parser/ref/arm/linux/minor-timing/stats.txt index 4d23ca501..55f9db9e0 100644 --- a/tests/long/se/20.parser/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/20.parser/ref/arm/linux/minor-timing/stats.txt @@ -1,106 +1,106 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.362632 # Number of seconds simulated -sim_ticks 362631828500 # Number of ticks simulated -final_tick 362631828500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.366439 # Number of seconds simulated +sim_ticks 366439129500 # Number of ticks simulated +final_tick 366439129500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 379372 # Simulator instruction rate (inst/s) -host_op_rate 410911 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 271571493 # Simulator tick rate (ticks/s) -host_mem_usage 317732 # Number of bytes of host memory used -host_seconds 1335.31 # Real time elapsed on the host +host_inst_rate 188596 # Simulator instruction rate (inst/s) +host_op_rate 204275 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 136422977 # Simulator tick rate (ticks/s) +host_mem_usage 271112 # Number of bytes of host memory used +host_seconds 2686.05 # Real time elapsed on the host sim_insts 506579366 # Number of instructions simulated sim_ops 548692589 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 179456 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 9032064 # Number of bytes read from this memory -system.physmem.bytes_read::total 9211520 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 179456 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 179456 # Number of instructions bytes read from this memory -system.physmem.bytes_written::writebacks 6221440 # Number of bytes written to this memory -system.physmem.bytes_written::total 6221440 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 2804 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 141126 # Number of read requests responded to by this memory -system.physmem.num_reads::total 143930 # Number of read requests responded to by this memory -system.physmem.num_writes::writebacks 97210 # Number of write requests responded to by this memory -system.physmem.num_writes::total 97210 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 494871 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 24906981 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 25401852 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 494871 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 494871 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 17156354 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 17156354 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 17156354 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 494871 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 24906981 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 42558206 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 143930 # Number of read requests accepted -system.physmem.writeReqs 97210 # Number of write requests accepted -system.physmem.readBursts 143930 # Number of DRAM read bursts, including those serviced by the write queue -system.physmem.writeBursts 97210 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 9204736 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 6784 # Total number of bytes read from write queue -system.physmem.bytesWritten 6219456 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 9211520 # Total read bytes from the system interface side -system.physmem.bytesWrittenSys 6221440 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 106 # Number of DRAM read bursts serviced by the write queue +system.physmem.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 179840 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 9028544 # Number of bytes read from this memory +system.physmem.bytes_read::total 9208384 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 179840 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 179840 # Number of instructions bytes read from this memory +system.physmem.bytes_written::writebacks 6219648 # Number of bytes written to this memory +system.physmem.bytes_written::total 6219648 # Number of bytes written to this memory +system.physmem.num_reads::cpu.inst 2810 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 141071 # Number of read requests responded to by this memory +system.physmem.num_reads::total 143881 # Number of read requests responded to by this memory +system.physmem.num_writes::writebacks 97182 # Number of write requests responded to by this memory +system.physmem.num_writes::total 97182 # Number of write requests responded to by this memory +system.physmem.bw_read::cpu.inst 490777 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 24638591 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 25129369 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 490777 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 490777 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 16973209 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 16973209 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 16973209 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 490777 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 24638591 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 42102578 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 143881 # Number of read requests accepted +system.physmem.writeReqs 97182 # Number of write requests accepted +system.physmem.readBursts 143881 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.writeBursts 97182 # Number of DRAM write bursts, including those merged in the write queue +system.physmem.bytesReadDRAM 9201344 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 7040 # Total number of bytes read from write queue +system.physmem.bytesWritten 6217600 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 9208384 # Total read bytes from the system interface side +system.physmem.bytesWrittenSys 6219648 # Total written bytes from the system interface side +system.physmem.servicedByWrQ 110 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 9406 # Per bank write bursts -system.physmem.perBankRdBursts::1 8921 # Per bank write bursts +system.physmem.perBankRdBursts::0 9364 # Per bank write bursts +system.physmem.perBankRdBursts::1 8912 # Per bank write bursts system.physmem.perBankRdBursts::2 8949 # Per bank write bursts -system.physmem.perBankRdBursts::3 8657 # Per bank write bursts -system.physmem.perBankRdBursts::4 9384 # Per bank write bursts +system.physmem.perBankRdBursts::3 8655 # Per bank write bursts +system.physmem.perBankRdBursts::4 9392 # Per bank write bursts system.physmem.perBankRdBursts::5 9355 # Per bank write bursts -system.physmem.perBankRdBursts::6 8962 # Per bank write bursts -system.physmem.perBankRdBursts::7 8101 # Per bank write bursts +system.physmem.perBankRdBursts::6 8959 # Per bank write bursts +system.physmem.perBankRdBursts::7 8100 # Per bank write bursts system.physmem.perBankRdBursts::8 8596 # Per bank write bursts -system.physmem.perBankRdBursts::9 8628 # Per bank write bursts -system.physmem.perBankRdBursts::10 8740 # Per bank write bursts -system.physmem.perBankRdBursts::11 9454 # Per bank write bursts -system.physmem.perBankRdBursts::12 9340 # Per bank write bursts -system.physmem.perBankRdBursts::13 9510 # Per bank write bursts -system.physmem.perBankRdBursts::14 8709 # Per bank write bursts -system.physmem.perBankRdBursts::15 9112 # Per bank write bursts -system.physmem.perBankWrBursts::0 6249 # Per bank write bursts -system.physmem.perBankWrBursts::1 6105 # Per bank write bursts -system.physmem.perBankWrBursts::2 6032 # Per bank write bursts -system.physmem.perBankWrBursts::3 5882 # Per bank write bursts -system.physmem.perBankWrBursts::4 6237 # Per bank write bursts -system.physmem.perBankWrBursts::5 6240 # Per bank write bursts -system.physmem.perBankWrBursts::6 6051 # Per bank write bursts -system.physmem.perBankWrBursts::7 5508 # Per bank write bursts -system.physmem.perBankWrBursts::8 5781 # Per bank write bursts -system.physmem.perBankWrBursts::9 5861 # Per bank write bursts +system.physmem.perBankRdBursts::9 8629 # Per bank write bursts +system.physmem.perBankRdBursts::10 8739 # Per bank write bursts +system.physmem.perBankRdBursts::11 9451 # Per bank write bursts +system.physmem.perBankRdBursts::12 9334 # Per bank write bursts +system.physmem.perBankRdBursts::13 9512 # Per bank write bursts +system.physmem.perBankRdBursts::14 8707 # Per bank write bursts +system.physmem.perBankRdBursts::15 9117 # Per bank write bursts +system.physmem.perBankWrBursts::0 6231 # Per bank write bursts +system.physmem.perBankWrBursts::1 6102 # Per bank write bursts +system.physmem.perBankWrBursts::2 6028 # Per bank write bursts +system.physmem.perBankWrBursts::3 5879 # Per bank write bursts +system.physmem.perBankWrBursts::4 6243 # Per bank write bursts +system.physmem.perBankWrBursts::5 6239 # Per bank write bursts +system.physmem.perBankWrBursts::6 6050 # Per bank write bursts +system.physmem.perBankWrBursts::7 5507 # Per bank write bursts +system.physmem.perBankWrBursts::8 5786 # Per bank write bursts +system.physmem.perBankWrBursts::9 5859 # Per bank write bursts system.physmem.perBankWrBursts::10 5978 # Per bank write bursts -system.physmem.perBankWrBursts::11 6494 # Per bank write bursts -system.physmem.perBankWrBursts::12 6355 # Per bank write bursts -system.physmem.perBankWrBursts::13 6320 # Per bank write bursts -system.physmem.perBankWrBursts::14 6000 # Per bank write bursts -system.physmem.perBankWrBursts::15 6086 # Per bank write bursts +system.physmem.perBankWrBursts::11 6493 # Per bank write bursts +system.physmem.perBankWrBursts::12 6351 # Per bank write bursts +system.physmem.perBankWrBursts::13 6319 # Per bank write bursts +system.physmem.perBankWrBursts::14 5995 # Per bank write bursts +system.physmem.perBankWrBursts::15 6090 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 362631802500 # Total gap between requests +system.physmem.totGap 366439104000 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 143930 # Read request sizes (log2) +system.physmem.readPktSize::6 143881 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) system.physmem.writePktSize::3 0 # Write request sizes (log2) system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) -system.physmem.writePktSize::6 97210 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 143484 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 320 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 20 # What read queue length does an incoming req see +system.physmem.writePktSize::6 97182 # Write request sizes (log2) +system.physmem.rdQLenPdf::0 143447 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 307 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 17 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,34 +145,34 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 2964 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 3137 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 5566 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 5692 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 5699 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 5705 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 5705 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 5703 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 5710 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 5731 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 5739 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 5733 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 5740 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 5717 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 5686 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 5684 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 5636 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 5622 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 14 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::34 6 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::35 3 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::36 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::37 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::38 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::39 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::40 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::41 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::42 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 2945 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 3139 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 5546 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 5695 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 5698 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 5688 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 5708 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 5717 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 5734 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 5737 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 5721 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 5718 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 5715 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 5728 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 5685 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 5691 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 5629 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 5618 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 18 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::34 11 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::35 4 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::36 4 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::37 4 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::38 6 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::39 3 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::40 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::41 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::42 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::43 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::44 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::45 0 # What write queue length does an incoming req see @@ -194,115 +194,118 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 65461 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 235.617299 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 156.242018 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 241.589954 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 24858 37.97% 37.97% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 18413 28.13% 66.10% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 6961 10.63% 76.74% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 7914 12.09% 88.83% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 2009 3.07% 91.89% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 1136 1.74% 93.63% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 792 1.21% 94.84% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 657 1.00% 95.84% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 2721 4.16% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 65461 # Bytes accessed per row activation +system.physmem.bytesPerActivate::samples 65604 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 235.015914 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 156.088937 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 241.071665 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 24900 37.96% 37.96% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 18453 28.13% 66.08% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 7121 10.85% 76.94% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 7867 11.99% 88.93% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 1977 3.01% 91.94% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 1093 1.67% 93.61% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 809 1.23% 94.84% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 630 0.96% 95.80% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 2754 4.20% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 65604 # Bytes accessed per row activation system.physmem.rdPerTurnAround::samples 5611 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 25.630191 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 380.618779 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 25.620745 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 380.610137 # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::0-1023 5609 99.96% 99.96% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::1024-2047 1 0.02% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::27648-28671 1 0.02% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::total 5611 # Reads before turning the bus around for writes system.physmem.wrPerTurnAround::samples 5611 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 17.319373 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 17.223479 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 2.351913 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16-17 2643 47.10% 47.10% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18-19 2820 50.26% 97.36% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::20-21 52 0.93% 98.29% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::22-23 28 0.50% 98.79% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::24-25 21 0.37% 99.16% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::26-27 8 0.14% 99.30% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::28-29 6 0.11% 99.41% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::30-31 9 0.16% 99.57% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::32-33 4 0.07% 99.64% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::34-35 6 0.11% 99.75% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::36-37 5 0.09% 99.84% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::40-41 1 0.02% 99.86% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::42-43 3 0.05% 99.91% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::44-45 2 0.04% 99.95% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::70-71 1 0.02% 99.96% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::72-73 1 0.02% 99.98% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::90-91 1 0.02% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 17.314204 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 17.219748 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 2.335766 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16-17 2654 47.30% 47.30% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18-19 2805 49.99% 97.29% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::20-21 62 1.10% 98.40% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::22-23 24 0.43% 98.82% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::24-25 17 0.30% 99.13% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::26-27 10 0.18% 99.30% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::28-29 10 0.18% 99.48% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::30-31 10 0.18% 99.66% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::32-33 2 0.04% 99.70% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::34-35 4 0.07% 99.77% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::36-37 2 0.04% 99.80% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::38-39 1 0.02% 99.82% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::40-41 2 0.04% 99.86% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::44-45 1 0.02% 99.88% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::46-47 1 0.02% 99.89% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::48-49 1 0.02% 99.91% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::50-51 1 0.02% 99.93% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::52-53 2 0.04% 99.96% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::62-63 1 0.02% 99.98% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::98-99 1 0.02% 100.00% # Writes before turning the bus around for reads system.physmem.wrPerTurnAround::total 5611 # Writes before turning the bus around for reads -system.physmem.totQLat 1538291500 # Total ticks spent queuing -system.physmem.totMemAccLat 4234991500 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 719120000 # Total ticks spent in databus transfers -system.physmem.avgQLat 10695.65 # Average queueing delay per DRAM burst +system.physmem.totQLat 1554447250 # Total ticks spent queuing +system.physmem.totMemAccLat 4250153500 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 718855000 # Total ticks spent in databus transfers +system.physmem.avgQLat 10811.97 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 29445.65 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 25.38 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 17.15 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 25.40 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 17.16 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 29561.97 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 25.11 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 16.97 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 25.13 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 16.97 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s system.physmem.busUtil 0.33 # Data bus utilization in percentage system.physmem.busUtilRead 0.20 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.13 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.03 # Average read queue length when enqueuing -system.physmem.avgWrQLen 19.56 # Average write queue length when enqueuing -system.physmem.readRowHits 110801 # Number of row buffer hits during reads -system.physmem.writeRowHits 64737 # Number of row buffer hits during writes -system.physmem.readRowHitRate 77.04 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 66.60 # Row buffer hit rate for writes -system.physmem.avgGap 1503822.69 # Average gap between requests -system.physmem.pageHitRate 72.83 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 249185160 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 135964125 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 559455000 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 312906240 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 23685164880 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 47417547600 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 175983265500 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 248343488505 # Total energy per rank (pJ) -system.physmem_0.averagePower 684.841129 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 292457177000 # Time in different power states -system.physmem_0.memoryStateTime::REF 12108980000 # Time in different power states +system.physmem.avgWrQLen 19.60 # Average write queue length when enqueuing +system.physmem.readRowHits 110522 # Number of row buffer hits during reads +system.physmem.writeRowHits 64789 # Number of row buffer hits during writes +system.physmem.readRowHitRate 76.87 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 66.67 # Row buffer hit rate for writes +system.physmem.avgGap 1520096.84 # Average gap between requests +system.physmem.pageHitRate 72.76 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 249842880 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 136323000 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 559080600 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 312783120 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 23933850720 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 47987220420 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 177768013500 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 250947114240 # Total energy per rank (pJ) +system.physmem_0.averagePower 684.830589 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 295423376000 # Time in different power states +system.physmem_0.memoryStateTime::REF 12236120000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 58063198250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 58777294250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 245586600 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 134000625 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 562138200 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 316684080 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 23685164880 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 46768401675 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 176552684250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 248264660310 # Total energy per rank (pJ) -system.physmem_1.averagePower 684.623774 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 293406599500 # Time in different power states -system.physmem_1.memoryStateTime::REF 12108980000 # Time in different power states +system.physmem_1.actEnergy 246017520 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 134235750 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 562114800 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 316645200 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 23933850720 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 47395195335 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 178287321750 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 250875381075 # Total energy per rank (pJ) +system.physmem_1.averagePower 684.634868 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 296291389000 # Time in different power states +system.physmem_1.memoryStateTime::REF 12236120000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 57113763250 # Time in different power states +system.physmem_1.memoryStateTime::ACT 57909758500 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 131880511 # Number of BP lookups -system.cpu.branchPred.condPredicted 98032974 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 5909980 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 68420287 # Number of BTB lookups -system.cpu.branchPred.BTBHits 60518878 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 132103761 # Number of BP lookups +system.cpu.branchPred.condPredicted 98193255 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 5910050 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 68601566 # Number of BTB lookups +system.cpu.branchPred.BTBHits 60590451 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 88.451658 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 9982385 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 18500 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 3889648 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 3881527 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 8121 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 53795 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 88.322256 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 10017120 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 18743 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 3891572 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 3883027 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 8545 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 54138 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -332,7 +335,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -362,7 +365,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -392,7 +395,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -423,16 +426,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 548 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 725263657 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 732878259 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 506579366 # Number of instructions committed system.cpu.committedOps 548692589 # Number of ops (including micro ops) committed -system.cpu.discardedOps 12911806 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 12939743 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.431688 # CPI: cycles per instruction -system.cpu.ipc 0.698476 # IPC: instructions per cycle +system.cpu.cpi 1.446720 # CPI: cycles per instruction +system.cpu.ipc 0.691219 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 375609862 68.46% 68.46% # Class of committed instruction system.cpu.op_class_0::IntMult 339219 0.06% 68.52% # Class of committed instruction @@ -468,469 +471,470 @@ system.cpu.op_class_0::MemWrite 56860222 10.36% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 548692589 # Class of committed instruction -system.cpu.tickCycles 688919604 # Number of cycles that the object actually ticked -system.cpu.idleCycles 36344053 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 1141477 # number of replacements -system.cpu.dcache.tags.tagsinuse 4070.722142 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 170992714 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 1145573 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 149.263918 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 4896334500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4070.722142 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.993829 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.993829 # Average percentage of cache occupancy +system.cpu.tickCycles 694071941 # Number of cycles that the object actually ticked +system.cpu.idleCycles 38806318 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 1141337 # number of replacements +system.cpu.dcache.tags.tagsinuse 4070.313641 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 171083825 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 1145433 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 149.361704 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 5033914500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4070.313641 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.993729 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.993729 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 27 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 19 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 553 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 3497 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 28 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 18 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 549 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 3501 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 346245015 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 346245015 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 114475063 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 114475063 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 53537828 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 53537828 # number of WriteReq hits -system.cpu.dcache.SoftPFReq_hits::cpu.data 2741 # number of SoftPFReq hits -system.cpu.dcache.SoftPFReq_hits::total 2741 # number of SoftPFReq hits +system.cpu.dcache.tags.tag_accesses 346338115 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 346338115 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 114566020 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 114566020 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 53537929 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 53537929 # number of WriteReq hits +system.cpu.dcache.SoftPFReq_hits::cpu.data 2794 # number of SoftPFReq hits +system.cpu.dcache.SoftPFReq_hits::total 2794 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 1488541 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 1488541 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 1488541 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 1488541 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 168012891 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 168012891 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 168015632 # number of overall hits -system.cpu.dcache.overall_hits::total 168015632 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 855770 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 855770 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 701221 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 701221 # number of WriteReq misses -system.cpu.dcache.SoftPFReq_misses::cpu.data 16 # number of SoftPFReq misses -system.cpu.dcache.SoftPFReq_misses::total 16 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 1556991 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 1556991 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 1557007 # number of overall misses -system.cpu.dcache.overall_misses::total 1557007 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 14058873500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 14058873500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 21921294000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 21921294000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 35980167500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 35980167500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 35980167500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 35980167500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 115330833 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 115330833 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_hits::cpu.data 168103949 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 168103949 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 168106743 # number of overall hits +system.cpu.dcache.overall_hits::total 168106743 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 811381 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 811381 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 701120 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 701120 # number of WriteReq misses +system.cpu.dcache.SoftPFReq_misses::cpu.data 15 # number of SoftPFReq misses +system.cpu.dcache.SoftPFReq_misses::total 15 # number of SoftPFReq misses +system.cpu.dcache.demand_misses::cpu.data 1512501 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 1512501 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 1512516 # number of overall misses +system.cpu.dcache.overall_misses::total 1512516 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 13462011000 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 13462011000 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 21943272000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 21943272000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 35405283000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 35405283000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 35405283000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 35405283000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 115377401 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 115377401 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 54239049 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 54239049 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::cpu.data 2757 # number of SoftPFReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::total 2757 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::cpu.data 2809 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::total 2809 # number of SoftPFReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::cpu.data 1488541 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::total 1488541 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 1488541 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 1488541 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 169569882 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 169569882 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 169572639 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 169572639 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.007420 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.007420 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.012928 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.012928 # miss rate for WriteReq accesses -system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.005803 # miss rate for SoftPFReq accesses -system.cpu.dcache.SoftPFReq_miss_rate::total 0.005803 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.009182 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.009182 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.009182 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.009182 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 16428.331795 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 16428.331795 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 31261.605115 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 31261.605115 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 23108.783224 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 23108.783224 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 23108.545755 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 23108.545755 # average overall miss latency +system.cpu.dcache.demand_accesses::cpu.data 169616450 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 169616450 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 169619259 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 169619259 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.007032 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.007032 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.012926 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.012926 # miss rate for WriteReq accesses +system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.005340 # miss rate for SoftPFReq accesses +system.cpu.dcache.SoftPFReq_miss_rate::total 0.005340 # miss rate for SoftPFReq accesses +system.cpu.dcache.demand_miss_rate::cpu.data 0.008917 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.008917 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.008917 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.008917 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 16591.479219 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 16591.479219 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 31297.455500 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 31297.455500 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 23408.436094 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 23408.436094 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 23408.203946 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 23408.203946 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 1069336 # number of writebacks -system.cpu.dcache.writebacks::total 1069336 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 66650 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 66650 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 344781 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 344781 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 411431 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 411431 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 411431 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 411431 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 789120 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 789120 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 356440 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 356440 # number of WriteReq MSHR misses -system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 13 # number of SoftPFReq MSHR misses -system.cpu.dcache.SoftPFReq_mshr_misses::total 13 # number of SoftPFReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 1145560 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 1145560 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 1145573 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 1145573 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 12372328000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 12372328000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 11135047500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 11135047500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 1042000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 1042000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 23507375500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 23507375500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 23508417500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 23508417500 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.006842 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.006842 # mshr miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.006572 # mshr miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.006572 # mshr miss rate for WriteReq accesses -system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.004715 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.004715 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.006756 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.006756 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.006756 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.006756 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 15678.639497 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 15678.639497 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 31239.612558 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 31239.612558 # average WriteReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 80153.846154 # average SoftPFReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 80153.846154 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 20520.422763 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 20520.422763 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 20521.099485 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 20521.099485 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 18130 # number of replacements -system.cpu.icache.tags.tagsinuse 1186.413401 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 198770599 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 20001 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 9938.033048 # Average number of references to valid blocks. +system.cpu.dcache.writebacks::writebacks 1069267 # number of writebacks +system.cpu.dcache.writebacks::total 1069267 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 22348 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 22348 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 344732 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 344732 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 367080 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 367080 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 367080 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 367080 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 789033 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 789033 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 356388 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 356388 # number of WriteReq MSHR misses +system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 12 # number of SoftPFReq MSHR misses +system.cpu.dcache.SoftPFReq_mshr_misses::total 12 # number of SoftPFReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 1145421 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 1145421 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 1145433 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 1145433 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 12369658000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 12369658000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 11145800500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 11145800500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 1093500 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 1093500 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 23515458500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 23515458500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 23516552000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 23516552000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.006839 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.006839 # mshr miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.006571 # mshr miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.006571 # mshr miss rate for WriteReq accesses +system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.004272 # mshr miss rate for SoftPFReq accesses +system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.004272 # mshr miss rate for SoftPFReq accesses +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.006753 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.006753 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.006753 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.006753 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 15676.984359 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 15676.984359 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 31274.342851 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 31274.342851 # average WriteReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 91125 # average SoftPFReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 91125 # average SoftPFReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 20529.969767 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 20529.969767 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 20530.709347 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 20530.709347 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 18175 # number of replacements +system.cpu.icache.tags.tagsinuse 1187.153068 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 199148908 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 20047 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 9934.100264 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1186.413401 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.579303 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.579303 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1871 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 1187.153068 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.579665 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.579665 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1872 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 41 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::1 63 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::2 58 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 312 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1397 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.913574 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 397601201 # Number of tag accesses -system.cpu.icache.tags.data_accesses 397601201 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 198770599 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 198770599 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 198770599 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 198770599 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 198770599 # number of overall hits -system.cpu.icache.overall_hits::total 198770599 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 20001 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 20001 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 20001 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 20001 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 20001 # number of overall misses -system.cpu.icache.overall_misses::total 20001 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 455038500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 455038500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 455038500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 455038500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 455038500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 455038500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 198790600 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 198790600 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 198790600 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 198790600 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 198790600 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 198790600 # number of overall (read+write) accesses +system.cpu.icache.tags.age_task_id_blocks_1024::2 57 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::3 311 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1400 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.914062 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 398357957 # Number of tag accesses +system.cpu.icache.tags.data_accesses 398357957 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 199148908 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 199148908 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 199148908 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 199148908 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 199148908 # number of overall hits +system.cpu.icache.overall_hits::total 199148908 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 20047 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 20047 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 20047 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 20047 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 20047 # number of overall misses +system.cpu.icache.overall_misses::total 20047 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 455856500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 455856500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 455856500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 455856500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 455856500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 455856500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 199168955 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 199168955 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 199168955 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 199168955 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 199168955 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 199168955 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000101 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000101 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000101 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000101 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000101 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000101 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 22750.787461 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 22750.787461 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 22750.787461 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 22750.787461 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 22750.787461 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 22750.787461 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 22739.387440 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 22739.387440 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 22739.387440 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 22739.387440 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 22739.387440 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 22739.387440 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 18130 # number of writebacks -system.cpu.icache.writebacks::total 18130 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 20001 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 20001 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 20001 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 20001 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 20001 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 20001 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 435037500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 435037500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 435037500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 435037500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 435037500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 435037500 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 18175 # number of writebacks +system.cpu.icache.writebacks::total 18175 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 20047 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 20047 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 20047 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 20047 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 20047 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 20047 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 435809500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 435809500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 435809500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 435809500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 435809500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 435809500 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000101 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000101 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000101 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000101 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000101 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000101 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 21750.787461 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 21750.787461 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 21750.787461 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 21750.787461 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 21750.787461 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 21750.787461 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 112376 # number of replacements -system.cpu.l2cache.tags.tagsinuse 27628.930561 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 1772118 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 143588 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 12.341686 # Average number of references to valid blocks. -system.cpu.l2cache.tags.warmup_cycle 163251686000 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 23500.584340 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 308.787313 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 3819.558908 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.717181 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.009423 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.116564 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.843168 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 31212 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 99 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 1 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 324 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 4939 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 25849 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.952515 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 19061751 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 19061751 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 1069336 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 1069336 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 17893 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 17893 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 255742 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 255742 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 17196 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 17196 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 748691 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 748691 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 17196 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 1004433 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 1021629 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 17196 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 1004433 # number of overall hits -system.cpu.l2cache.overall_hits::total 1021629 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 100949 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 100949 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2805 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 2805 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 40191 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 40191 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 2805 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 141140 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 143945 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 2805 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 141140 # number of overall misses -system.cpu.l2cache.overall_misses::total 143945 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 7917540500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 7917540500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 223778500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 223778500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 3305085000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 3305085000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 223778500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 11222625500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 11446404000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 223778500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 11222625500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 11446404000 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 1069336 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 1069336 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 17893 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 17893 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 356691 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 356691 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 20001 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 20001 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 788882 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 788882 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 20001 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 1145573 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 1165574 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 20001 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 1145573 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 1165574 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.283015 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.283015 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.140243 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.140243 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.050947 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.050947 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.140243 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.123205 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.123497 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.140243 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.123205 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.123497 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 78431.093919 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 78431.093919 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79778.431373 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79778.431373 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 82234.455475 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 82234.455475 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79778.431373 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 79514.138444 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 79519.288617 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79778.431373 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 79514.138444 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 79519.288617 # average overall miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 21739.387440 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 21739.387440 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 21739.387440 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 21739.387440 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 21739.387440 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 21739.387440 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 112318 # number of replacements +system.cpu.l2cache.tags.tagsinuse 27616.037174 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 1771878 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 143528 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 12.345173 # Average number of references to valid blocks. +system.cpu.l2cache.tags.warmup_cycle 165163715500 # Cycle when the warmup percentage was hit. +system.cpu.l2cache.tags.occ_blocks::writebacks 23489.264935 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 308.326790 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 3818.445449 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.716835 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.009409 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.116530 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.842775 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 31210 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 100 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 318 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 4934 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 25858 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.952454 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 19060134 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 19060134 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 1069267 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 1069267 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 17938 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 17938 # number of WritebackClean hits +system.cpu.l2cache.ReadExReq_hits::cpu.data 255711 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 255711 # number of ReadExReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 17236 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 17236 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 748638 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 748638 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 17236 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 1004349 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 1021585 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 17236 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 1004349 # number of overall hits +system.cpu.l2cache.overall_hits::total 1021585 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 100927 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 100927 # number of ReadExReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2811 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 2811 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 40157 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 40157 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 2811 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 141084 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 143895 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 2811 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 141084 # number of overall misses +system.cpu.l2cache.overall_misses::total 143895 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 7928727500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 7928727500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 224093000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 224093000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 3306674000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 3306674000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 224093000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 11235401500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 11459494500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 224093000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 11235401500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 11459494500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 1069267 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 1069267 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 17938 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 17938 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 356638 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 356638 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 20047 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 20047 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 788795 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 788795 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 20047 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 1145433 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 1165480 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 20047 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 1145433 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 1165480 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.282996 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.282996 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.140220 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.140220 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.050909 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.050909 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.140220 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.123171 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.123464 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.140220 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.123171 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.123464 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 78559.032766 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 78559.032766 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79720.028460 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79720.028460 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 82343.651169 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 82343.651169 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79720.028460 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 79636.255706 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 79637.892213 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79720.028460 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 79636.255706 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 79637.892213 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.writebacks::writebacks 97210 # number of writebacks -system.cpu.l2cache.writebacks::total 97210 # number of writebacks +system.cpu.l2cache.writebacks::writebacks 97182 # number of writebacks +system.cpu.l2cache.writebacks::total 97182 # number of writebacks system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 1 # number of ReadCleanReq MSHR hits system.cpu.l2cache.ReadCleanReq_mshr_hits::total 1 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 14 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 14 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 13 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 13 # number of ReadSharedReq MSHR hits system.cpu.l2cache.demand_mshr_hits::cpu.inst 1 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 14 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 15 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 13 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 14 # number of demand (read+write) MSHR hits system.cpu.l2cache.overall_mshr_hits::cpu.inst 1 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 14 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 15 # number of overall MSHR hits -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 100949 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 100949 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2804 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2804 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 40177 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 40177 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 2804 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 141126 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 143930 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 2804 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 141126 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 143930 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 6908050500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 6908050500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 195670500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 195670500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 2902356000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 2902356000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 195670500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 9810406500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 10006077000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 195670500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 9810406500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 10006077000 # number of overall MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.283015 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.283015 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.140193 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.140193 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.050929 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.050929 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.140193 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.123192 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.123484 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.140193 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.123192 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.123484 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 68431.093919 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 68431.093919 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69782.631954 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69782.631954 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 72239.241357 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 72239.241357 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69782.631954 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 69515.231070 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 69520.440492 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69782.631954 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 69515.231070 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 69520.440492 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 2325181 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 1159677 # Number of requests hitting in the snoop filter with a single holder of the requested data. -system.cpu.toL2Bus.snoop_filter.hit_multi_requests 4997 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 2608 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2605 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_hits::cpu.data 13 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 14 # number of overall MSHR hits +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 100927 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 100927 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2810 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2810 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 40144 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 40144 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 2810 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 141071 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 143881 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 2810 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 141071 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 143881 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 6919457500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 6919457500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 195753000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 195753000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 2904162000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 2904162000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 195753000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 9823619500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 10019372500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 195753000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 9823619500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 10019372500 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.282996 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.282996 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.140171 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.140171 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.050893 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.050893 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.140171 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.123160 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.123452 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.140171 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.123160 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.123452 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 68559.032766 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 68559.032766 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69662.989324 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69662.989324 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 72343.612993 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 72343.612993 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69662.989324 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 69635.995350 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 69636.522543 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69662.989324 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 69635.995350 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 69636.522543 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 2324992 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 1159582 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.hit_multi_requests 4996 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 2610 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2607 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 3 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 808883 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 1166546 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 18130 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 87307 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 356691 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 356691 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 20001 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 788882 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 58132 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 3432623 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 3490755 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 2440384 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 141754176 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 144194560 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 112376 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 1277950 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.006008 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.077309 # Request fanout histogram +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 808842 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 1166449 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 18175 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 87206 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 356638 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 356638 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 20047 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 788795 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 58269 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 3432203 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 3490472 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 2446208 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 141740800 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 144187008 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 112318 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 6219648 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 1277798 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.006010 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.077318 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 1270275 99.40% 99.40% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 7672 0.60% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 1270122 99.40% 99.40% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 7673 0.60% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 3 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 2 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 1277950 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 2250056500 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 1277798 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 2249938000 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.6 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 30027947 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 30093953 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 1718367983 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 1718157484 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.5 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 362631828500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 42981 # Transaction distribution -system.membus.trans_dist::WritebackDirty 97210 # Transaction distribution -system.membus.trans_dist::CleanEvict 12558 # Transaction distribution -system.membus.trans_dist::ReadExReq 100949 # Transaction distribution -system.membus.trans_dist::ReadExResp 100949 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 42981 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 397628 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 397628 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 15432960 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 15432960 # Cumulative packet size per connected master and slave (bytes) +system.membus.pwrStateResidencyTicks::UNDEFINED 366439129500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 42954 # Transaction distribution +system.membus.trans_dist::WritebackDirty 97182 # Transaction distribution +system.membus.trans_dist::CleanEvict 12526 # Transaction distribution +system.membus.trans_dist::ReadExReq 100927 # Transaction distribution +system.membus.trans_dist::ReadExResp 100927 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 42954 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 397470 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 397470 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 15428032 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 15428032 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 253698 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 253589 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 253698 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 253589 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 253698 # Request fanout histogram -system.membus.reqLayer0.occupancy 685564500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 253589 # Request fanout histogram +system.membus.reqLayer0.occupancy 685523500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.2 # Layer utilization (%) -system.membus.respLayer1.occupancy 763995250 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 763755750 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.2 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/20.parser/ref/arm/linux/o3-timing/config.ini b/tests/long/se/20.parser/ref/arm/linux/o3-timing/config.ini index 5bb4589de..67485e1be 100644 --- a/tests/long/se/20.parser/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/20.parser/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,9 +782,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/20.parser/ref/arm/linux/o3-timing/simerr b/tests/long/se/20.parser/ref/arm/linux/o3-timing/simerr index be90b0340..caeab8324 100755 --- a/tests/long/se/20.parser/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/20.parser/ref/arm/linux/o3-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: CP14 unimplemented crn[8], opc1[2], crm[9], opc2[4] diff --git a/tests/long/se/20.parser/ref/arm/linux/o3-timing/simout b/tests/long/se/20.parser/ref/arm/linux/o3-timing/simout index b1e4c3523..3589e4728 100755 --- a/tests/long/se/20.parser/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/20.parser/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/20.parser/arm/linux/o3-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 23:07:21 -gem5 started Mar 16 2016 23:48:20 -gem5 executing on dinar2c11, pid 25963 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/20.parser/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:46:05 +gem5 executing on e108600-lin, pid 23184 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/20.parser/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/20.parser/ref/arm/linux/o3-timing/stats.txt index b6b8a4259..083d24314 100644 --- a/tests/long/se/20.parser/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/20.parser/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.232865 # Nu sim_ticks 232864525000 # Number of ticks simulated final_tick 232864525000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 221507 # Simulator instruction rate (inst/s) -host_op_rate 239970 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 102093126 # Simulator tick rate (ticks/s) -host_mem_usage 343096 # Number of bytes of host memory used -host_seconds 2280.90 # Real time elapsed on the host +host_inst_rate 156445 # Simulator instruction rate (inst/s) +host_op_rate 169485 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 72105974 # Simulator tick rate (ticks/s) +host_mem_usage 295816 # Number of bytes of host memory used +host_seconds 3229.48 # Real time elapsed on the host sim_insts 505234934 # Number of instructions simulated sim_ops 547348155 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1204,6 +1204,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 360627392 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 370455424 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 950855 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 18712896 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 3845578 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.078356 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.284056 # Request fanout histogram @@ -1236,6 +1237,7 @@ system.membus.pkt_count::total 1239087 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 45841536 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 45841536 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 815167 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/config.ini b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/config.ini index 6807fa19b..719526a91 100644 --- a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/config.ini +++ b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -106,9 +116,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -122,9 +137,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -182,9 +202,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -198,9 +223,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -218,9 +248,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -250,10 +280,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -268,11 +303,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simerr b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simerr +++ b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simout b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simout index b0dd0015e..6f63d3022 100755 --- a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simout +++ b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-ato gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:51:04 -gem5 started Mar 16 2016 16:37:21 -gem5 executing on dinar2c11, pid 16154 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-atomic -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-atomic +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:22 +gem5 executing on e108600-lin, pid 23082 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/arm/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/stats.txt b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/stats.txt index 826ec1511..e8a891fe8 100644 --- a/tests/long/se/20.parser/ref/arm/linux/simple-atomic/stats.txt +++ b/tests/long/se/20.parser/ref/arm/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.279361 # Nu sim_ticks 279360903000 # Number of ticks simulated final_tick 279360903000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 2143205 # Simulator instruction rate (inst/s) -host_op_rate 2321375 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1181904303 # Simulator tick rate (ticks/s) -host_mem_usage 305572 # Number of bytes of host memory used -host_seconds 236.37 # Real time elapsed on the host +host_inst_rate 1100009 # Simulator instruction rate (inst/s) +host_op_rate 1191455 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 606617028 # Simulator tick rate (ticks/s) +host_mem_usage 259840 # Number of bytes of host memory used +host_seconds 460.52 # Real time elapsed on the host sim_insts 506578818 # Number of instructions simulated sim_ops 548692039 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -237,6 +237,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 2066434344 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 638914943 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 2705349287 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 687926230 # Request fanout histogram system.membus.snoop_fanout::mean 0.750965 # Request fanout histogram system.membus.snoop_fanout::stdev 0.432454 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-timing/config.ini b/tests/long/se/20.parser/ref/arm/linux/simple-timing/config.ini index f7f42e194..cc618b726 100644 --- a/tests/long/se/20.parser/ref/arm/linux/simple-timing/config.ini +++ b/tests/long/se/20.parser/ref/arm/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -90,12 +100,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -114,8 +129,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -138,9 +158,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -154,9 +179,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -167,12 +197,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +226,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -250,9 +290,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -266,9 +311,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -279,12 +329,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -303,8 +358,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -312,10 +372,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -346,9 +411,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -378,10 +443,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -396,11 +466,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-timing/simerr b/tests/long/se/20.parser/ref/arm/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/20.parser/ref/arm/linux/simple-timing/simerr +++ b/tests/long/se/20.parser/ref/arm/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-timing/simout b/tests/long/se/20.parser/ref/arm/linux/simple-timing/simout index 7596ee7d2..1889b3430 100755 --- a/tests/long/se/20.parser/ref/arm/linux/simple-timing/simout +++ b/tests/long/se/20.parser/ref/arm/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:51:04 -gem5 started Mar 16 2016 15:51:37 -gem5 executing on dinar2c11, pid 15211 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:21 +gem5 executing on e108600-lin, pid 23071 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/20.parser/arm/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/arm/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/20.parser/ref/arm/linux/simple-timing/stats.txt b/tests/long/se/20.parser/ref/arm/linux/simple-timing/stats.txt index 59b7a6f8a..a77764c75 100644 --- a/tests/long/se/20.parser/ref/arm/linux/simple-timing/stats.txt +++ b/tests/long/se/20.parser/ref/arm/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.708539 # Nu sim_ticks 708539449500 # Number of ticks simulated final_tick 708539449500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1462928 # Simulator instruction rate (inst/s) -host_op_rate 1584286 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2052623495 # Simulator tick rate (ticks/s) -host_mem_usage 315564 # Number of bytes of host memory used -host_seconds 345.19 # Real time elapsed on the host +host_inst_rate 665557 # Simulator instruction rate (inst/s) +host_op_rate 720769 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 933837970 # Simulator tick rate (ticks/s) +host_mem_usage 269828 # Number of bytes of host memory used +host_seconds 758.74 # Real time elapsed on the host sim_insts 504984064 # Number of instructions simulated sim_ops 546875315 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -626,6 +626,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 141189120 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 142552896 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 110394 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 6165120 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 1262287 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.004566 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.067432 # Request fanout histogram @@ -655,6 +656,7 @@ system.membus.pkt_count::total 392978 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 15276416 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 15276416 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 250615 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/x86/linux/o3-timing/config.ini b/tests/long/se/20.parser/ref/x86/linux/o3-timing/config.ini index f75c6f447..fb202712b 100644 --- a/tests/long/se/20.parser/ref/x86/linux/o3-timing/config.ini +++ b/tests/long/se/20.parser/ref/x86/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -70,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -106,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -151,11 +163,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -164,12 +183,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -188,8 +212,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -203,8 +232,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -522,12 +556,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -546,18 +585,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -577,8 +626,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -589,12 +643,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -613,8 +672,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -622,10 +686,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -656,9 +725,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/x86/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -688,10 +757,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -735,6 +809,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -746,7 +821,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/20.parser/ref/x86/linux/o3-timing/simerr b/tests/long/se/20.parser/ref/x86/linux/o3-timing/simerr index f9e2ef3b2..bbcd9d751 100755 --- a/tests/long/se/20.parser/ref/x86/linux/o3-timing/simerr +++ b/tests/long/se/20.parser/ref/x86/linux/o3-timing/simerr @@ -1 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/20.parser/ref/x86/linux/o3-timing/simout b/tests/long/se/20.parser/ref/x86/linux/o3-timing/simout index 48af414dd..72c2f65ba 100755 --- a/tests/long/se/20.parser/ref/x86/linux/o3-timing/simout +++ b/tests/long/se/20.parser/ref/x86/linux/o3-timing/simout @@ -3,29 +3,18 @@ Redirecting stderr to build/X86/tests/opt/long/se/20.parser/x86/linux/o3-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 22:57:26 -gem5 started Mar 16 2016 22:58:58 -gem5 executing on dinar2c11, pid 24771 -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/X86/tests/opt/long/se/20.parser/x86/linux/o3-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:20 +gem5 executing on e108600-lin, pid 18568 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/x86/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... - Reading the dictionary files: **info: Increasing stack size by one page. -*******info: Increasing stack size by one page. -******************************info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. -info: Increasing stack size by one page. info: Increasing stack size by one page. + Reading the dictionary files: **info: Increasing stack size by one page. info: Increasing stack size by one page. -********** +*********************************************** 58924 words stored in 3784810 bytes @@ -57,6 +46,13 @@ Echoing of input sentence turned on. - he ran home so quickly that his mother could hardly believe he had called from school - so many people attended that they spilled over into several neighboring fields - voting in favor of the bill were 36 Republicans and 4 moderate Democrats +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. : Grace may not be possible to fix the problem any program as good as ours should be useful biochemically , I think the experiment has a lot of problems @@ -78,11 +74,9 @@ Echoing of input sentence turned on. the man with whom I play tennis is here there is a dog in the park this is not the man we know and love -info: Increasing stack size by one page. -info: Increasing stack size by one page. we like to eat at restaurants , usually on weekends what did John say he thought you should do about 2 million people attended the five best costumes got prizes No errors! -Exiting @ tick 404911731500 because target called exit() +Exiting @ tick 481957625500 because target called exit() diff --git a/tests/long/se/20.parser/ref/x86/linux/o3-timing/stats.txt b/tests/long/se/20.parser/ref/x86/linux/o3-timing/stats.txt index 2ac1aa390..4e13e1bff 100644 --- a/tests/long/se/20.parser/ref/x86/linux/o3-timing/stats.txt +++ b/tests/long/se/20.parser/ref/x86/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.481958 # Nu sim_ticks 481957625500 # Number of ticks simulated final_tick 481957625500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 134289 # Simulator instruction rate (inst/s) -host_op_rate 248503 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 78275315 # Simulator tick rate (ticks/s) -host_mem_usage 362988 # Number of bytes of host memory used -host_seconds 6157.21 # Real time elapsed on the host +host_inst_rate 109870 # Simulator instruction rate (inst/s) +host_op_rate 203315 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 64041688 # Simulator tick rate (ticks/s) +host_mem_usage 315224 # Number of bytes of host memory used +host_seconds 7525.69 # Real time elapsed on the host sim_insts 826847303 # Number of instructions simulated sim_ops 1530082520 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1017,6 +1017,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 312832576 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 313452224 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 356883 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 18985088 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 2914251 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.004390 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.066139 # Request fanout histogram @@ -1049,6 +1050,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 4363 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 43633600 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 43633600 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 740563 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/config.ini b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/config.ini index 9f3703298..4c9b068a2 100644 --- a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/config.ini +++ b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -53,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -69,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -99,18 +111,28 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[4] [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[5] int_slave=system.membus.master[2] @@ -130,8 +152,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[3] @@ -149,9 +176,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/x86/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -181,10 +208,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -199,11 +231,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simerr b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simerr index e69de29bb..aadc3d011 100755 --- a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simerr +++ b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simerr @@ -0,0 +1,2 @@ +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simout b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simout index ff993af56..3a0d1b2f1 100755 --- a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simout +++ b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/X86/tests/opt/long/se/20.parser/x86/linux/simple-ato gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 22:57:26 -gem5 started Mar 16 2016 22:58:08 -gem5 executing on dinar2c11, pid 24736 -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/simple-atomic -re /home/stever/gem5-public/tests/run.py build/X86/tests/opt/long/se/20.parser/x86/linux/simple-atomic +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:19 +gem5 executing on e108600-lin, pid 18563 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/x86/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/stats.txt b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/stats.txt index 8deb96433..ff2284b45 100644 --- a/tests/long/se/20.parser/ref/x86/linux/simple-atomic/stats.txt +++ b/tests/long/se/20.parser/ref/x86/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.885773 # Nu sim_ticks 885772926000 # Number of ticks simulated final_tick 885772926000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1531547 # Simulator instruction rate (inst/s) -host_op_rate 2834130 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1640692833 # Simulator tick rate (ticks/s) -host_mem_usage 315956 # Number of bytes of host memory used -host_seconds 539.88 # Real time elapsed on the host +host_inst_rate 771975 # Simulator instruction rate (inst/s) +host_op_rate 1428542 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 826990545 # Simulator tick rate (ticks/s) +host_mem_usage 269652 # Number of bytes of host memory used +host_seconds 1071.08 # Real time elapsed on the host sim_insts 826847304 # Number of instructions simulated sim_ops 1530082521 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -120,6 +120,7 @@ system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 3277364750 system.membus.pkt_size_system.cpu.dcache_port::total 3277364750 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 11823849838 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 1601552189 # Request fanout histogram system.membus.snoop_fanout::mean 0.667047 # Request fanout histogram system.membus.snoop_fanout::stdev 0.471270 # Request fanout histogram diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-timing/config.ini b/tests/long/se/20.parser/ref/x86/linux/simple-timing/config.ini index 4292720d5..d62d690f2 100644 --- a/tests/long/se/20.parser/ref/x86/linux/simple-timing/config.ini +++ b/tests/long/se/20.parser/ref/x86/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -53,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -68,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -92,12 +104,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -116,8 +133,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -131,8 +153,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -143,12 +170,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -167,18 +199,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -198,8 +240,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -210,12 +257,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -234,8 +286,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -243,10 +300,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -277,9 +339,9 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/x86/linux/parser +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/parser gid=100 -input=/dist/m5/cpu2000/data/parser/mdred/input/parser.in +input=/arm/projectscratch/randd/systems/dist/cpu2000/data/parser/mdred/input/parser.in kvmInSE=false max_stack_size=67108864 output=cout @@ -309,10 +371,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -327,11 +394,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-timing/simerr b/tests/long/se/20.parser/ref/x86/linux/simple-timing/simerr index e69de29bb..aadc3d011 100755 --- a/tests/long/se/20.parser/ref/x86/linux/simple-timing/simerr +++ b/tests/long/se/20.parser/ref/x86/linux/simple-timing/simerr @@ -0,0 +1,2 @@ +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-timing/simout b/tests/long/se/20.parser/ref/x86/linux/simple-timing/simout index ded960c36..e0c4a0b01 100755 --- a/tests/long/se/20.parser/ref/x86/linux/simple-timing/simout +++ b/tests/long/se/20.parser/ref/x86/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/X86/tests/opt/long/se/20.parser/x86/linux/simple-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 22:57:26 -gem5 started Mar 16 2016 22:57:56 -gem5 executing on dinar2c11, pid 24718 -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/simple-timing -re /home/stever/gem5-public/tests/run.py build/X86/tests/opt/long/se/20.parser/x86/linux/simple-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:17 +gem5 executing on e108600-lin, pid 18541 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/20.parser/x86/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/20.parser/x86/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/20.parser/ref/x86/linux/simple-timing/stats.txt b/tests/long/se/20.parser/ref/x86/linux/simple-timing/stats.txt index 38495841e..b7bd8e61b 100644 --- a/tests/long/se/20.parser/ref/x86/linux/simple-timing/stats.txt +++ b/tests/long/se/20.parser/ref/x86/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 1.650501 # Nu sim_ticks 1650501252500 # Number of ticks simulated final_tick 1650501252500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 943240 # Simulator instruction rate (inst/s) -host_op_rate 1745467 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1882837072 # Simulator tick rate (ticks/s) -host_mem_usage 326104 # Number of bytes of host memory used -host_seconds 876.60 # Real time elapsed on the host +host_inst_rate 516047 # Simulator instruction rate (inst/s) +host_op_rate 954946 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1030101248 # Simulator tick rate (ticks/s) +host_mem_usage 278616 # Number of bytes of host memory used +host_seconds 1602.27 # Real time elapsed on the host sim_insts 826847304 # Number of instructions simulated sim_ops 1530082521 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -486,6 +486,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 310165312 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 310425600 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 348438 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 18765312 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 2872364 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000602 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.024527 # Request fanout histogram @@ -517,6 +518,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 4313 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 43139968 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 43139968 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 727569 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/config.ini index 00495eb93..00cf13ff8 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/eon gid=100 input=cin kvmInSE=false @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simerr index 3b53ebc6c..9c10deefc 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simout b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simout index d34e3637b..33c16c36c 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/simout @@ -3,15 +3,15 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/minor-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:15:11 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4300 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Eon, Version 1.1 info: Increasing stack size by one page. -OO-style eon Time= 0.216667 -Exiting @ tick 225710988500 because target called exit() +OO-style eon Time= 0.233333 +Exiting @ tick 233525789500 because target called exit() diff --git a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/stats.txt index 1c291ca67..b65c3962a 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/30.eon/ref/alpha/tru64/minor-timing/stats.txt @@ -1,43 +1,43 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.223533 # Number of seconds simulated -sim_ticks 223532962500 # Number of ticks simulated -final_tick 223532962500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.233526 # Number of seconds simulated +sim_ticks 233525789500 # Number of ticks simulated +final_tick 233525789500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 488740 # Simulator instruction rate (inst/s) -host_op_rate 488740 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 274038351 # Simulator tick rate (ticks/s) -host_mem_usage 302272 # Number of bytes of host memory used -host_seconds 815.70 # Real time elapsed on the host -sim_insts 398664665 # Number of instructions simulated -sim_ops 398664665 # Number of ops (including micro ops) simulated +host_inst_rate 279317 # Simulator instruction rate (inst/s) +host_op_rate 279317 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 163615265 # Simulator tick rate (ticks/s) +host_mem_usage 255720 # Number of bytes of host memory used +host_seconds 1427.29 # Real time elapsed on the host +sim_insts 398664651 # Number of instructions simulated +sim_ops 398664651 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 249088 # Number of bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 249280 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 254592 # Number of bytes read from this memory -system.physmem.bytes_read::total 503680 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 249088 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 249088 # Number of instructions bytes read from this memory -system.physmem.num_reads::cpu.inst 3892 # Number of read requests responded to by this memory +system.physmem.bytes_read::total 503872 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 249280 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 249280 # Number of instructions bytes read from this memory +system.physmem.num_reads::cpu.inst 3895 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 3978 # Number of read requests responded to by this memory -system.physmem.num_reads::total 7870 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 1114323 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 1138946 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 2253269 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 1114323 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 1114323 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 1114323 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 1138946 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 2253269 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 7870 # Number of read requests accepted +system.physmem.num_reads::total 7873 # Number of read requests responded to by this memory +system.physmem.bw_read::cpu.inst 1067462 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 1090209 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 2157672 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 1067462 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 1067462 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 1067462 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 1090209 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 2157672 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 7873 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted -system.physmem.readBursts 7870 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 7873 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 0 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 503680 # Total number of bytes read from DRAM +system.physmem.bytesReadDRAM 503872 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 0 # Total number of bytes read from write queue system.physmem.bytesWritten 0 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 503680 # Total read bytes from the system interface side +system.physmem.bytesReadSys 503872 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 0 # Total written bytes from the system interface side system.physmem.servicedByWrQ 0 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one @@ -46,9 +46,9 @@ system.physmem.perBankRdBursts::0 548 # Pe system.physmem.perBankRdBursts::1 675 # Per bank write bursts system.physmem.perBankRdBursts::2 473 # Per bank write bursts system.physmem.perBankRdBursts::3 633 # Per bank write bursts -system.physmem.perBankRdBursts::4 474 # Per bank write bursts +system.physmem.perBankRdBursts::4 475 # Per bank write bursts system.physmem.perBankRdBursts::5 477 # Per bank write bursts -system.physmem.perBankRdBursts::6 562 # Per bank write bursts +system.physmem.perBankRdBursts::6 563 # Per bank write bursts system.physmem.perBankRdBursts::7 560 # Per bank write bursts system.physmem.perBankRdBursts::8 471 # Per bank write bursts system.physmem.perBankRdBursts::9 437 # Per bank write bursts @@ -57,7 +57,7 @@ system.physmem.perBankRdBursts::11 323 # Pe system.physmem.perBankRdBursts::12 430 # Per bank write bursts system.physmem.perBankRdBursts::13 556 # Per bank write bursts system.physmem.perBankRdBursts::14 473 # Per bank write bursts -system.physmem.perBankRdBursts::15 424 # Per bank write bursts +system.physmem.perBankRdBursts::15 425 # Per bank write bursts system.physmem.perBankWrBursts::0 0 # Per bank write bursts system.physmem.perBankWrBursts::1 0 # Per bank write bursts system.physmem.perBankWrBursts::2 0 # Per bank write bursts @@ -76,14 +76,14 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 223532875000 # Total gap between requests +system.physmem.totGap 233525688500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 7870 # Read request sizes (log2) +system.physmem.readPktSize::6 7873 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 6816 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 971 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 83 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 6857 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 948 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 68 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -188,28 +188,28 @@ system.physmem.wrQLenPdf::61 0 # Wh system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see system.physmem.bytesPerActivate::samples 1541 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 325.149903 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 194.496255 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 330.966466 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 538 34.91% 34.91% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 340 22.06% 56.98% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 192 12.46% 69.44% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 106 6.88% 76.31% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 56 3.63% 79.95% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 49 3.18% 83.13% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 40 2.60% 85.72% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 36 2.34% 88.06% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 184 11.94% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 326.852693 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 195.480715 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 331.694198 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 535 34.72% 34.72% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 344 22.32% 57.04% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 186 12.07% 69.11% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 104 6.75% 75.86% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 66 4.28% 80.14% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 53 3.44% 83.58% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 28 1.82% 85.40% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 39 2.53% 87.93% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 186 12.07% 100.00% # Bytes accessed per row activation system.physmem.bytesPerActivate::total 1541 # Bytes accessed per row activation -system.physmem.totQLat 51693000 # Total ticks spent queuing -system.physmem.totMemAccLat 199255500 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 39350000 # Total ticks spent in databus transfers -system.physmem.avgQLat 6568.36 # Average queueing delay per DRAM burst +system.physmem.totQLat 52273750 # Total ticks spent queuing +system.physmem.totMemAccLat 199892500 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 39365000 # Total ticks spent in databus transfers +system.physmem.avgQLat 6639.62 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 25318.36 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 2.25 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 25389.62 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 2.16 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 2.25 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 2.16 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s system.physmem.busUtil 0.02 # Data bus utilization in percentage @@ -217,75 +217,75 @@ system.physmem.busUtilRead 0.02 # Da system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 6320 # Number of row buffer hits during reads +system.physmem.readRowHits 6330 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 80.30 # Row buffer hit rate for reads +system.physmem.readRowHitRate 80.40 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 28403160.74 # Average gap between requests -system.physmem.pageHitRate 80.30 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 6751080 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 3683625 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 34125000 # Energy for read commands per rank (pJ) +system.physmem.avgGap 29661588.78 # Average gap between requests +system.physmem.pageHitRate 80.40 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 6804000 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 3712500 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 34327800 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 14599740480 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 5792542920 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 129035577000 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 149472420105 # Total energy per rank (pJ) -system.physmem_0.averagePower 668.696853 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 214662823500 # Time in different power states -system.physmem_0.memoryStateTime::REF 7464080000 # Time in different power states +system.physmem_0.refreshEnergy 15252731520 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 5982776145 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 134867232750 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 156147584715 # Total energy per rank (pJ) +system.physmem_0.averagePower 668.653337 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 224361889750 # Time in different power states +system.physmem_0.memoryStateTime::REF 7797920000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 1403552000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 1365674000 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 4891320 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 2668875 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 26933400 # Energy for read commands per rank (pJ) +system.physmem_1.actEnergy 4845960 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 2644125 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 27058200 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 14599740480 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 5529545775 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 129266276250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 149430056100 # Total energy per rank (pJ) -system.physmem_1.averagePower 668.507329 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 215046035000 # Time in different power states -system.physmem_1.memoryStateTime::REF 7464080000 # Time in different power states +system.physmem_1.refreshEnergy 15252731520 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 5743132470 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 135077446500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 156107858775 # Total energy per rank (pJ) +system.physmem_1.averagePower 668.483223 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 224713608000 # Time in different power states +system.physmem_1.memoryStateTime::REF 7797920000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 1017823750 # Time in different power states +system.physmem_1.memoryStateTime::ACT 1013955750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 45898041 # Number of BP lookups -system.cpu.branchPred.condPredicted 26691639 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 566044 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 25194489 # Number of BTB lookups -system.cpu.branchPred.BTBHits 18810772 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 45912937 # Number of BP lookups +system.cpu.branchPred.condPredicted 26702744 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 565787 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 25186730 # Number of BTB lookups +system.cpu.branchPred.BTBHits 18811780 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 74.662249 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 8282157 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 322 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 2248490 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 2235007 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 13483 # Number of indirect misses. +system.cpu.branchPred.BTBHitPct 74.689251 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 8285572 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 323 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 2249877 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 2235903 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 13974 # Number of indirect misses. system.cpu.branchPredindirectMispredicted 111495 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 95357145 # DTB read hits -system.cpu.dtb.read_misses 114 # DTB read misses +system.cpu.dtb.read_hits 95338457 # DTB read hits +system.cpu.dtb.read_misses 116 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 95357259 # DTB read accesses -system.cpu.dtb.write_hits 73594596 # DTB write hits -system.cpu.dtb.write_misses 852 # DTB write misses +system.cpu.dtb.read_accesses 95338573 # DTB read accesses +system.cpu.dtb.write_hits 73578378 # DTB write hits +system.cpu.dtb.write_misses 849 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 73595448 # DTB write accesses -system.cpu.dtb.data_hits 168951741 # DTB hits -system.cpu.dtb.data_misses 966 # DTB misses +system.cpu.dtb.write_accesses 73579227 # DTB write accesses +system.cpu.dtb.data_hits 168916835 # DTB hits +system.cpu.dtb.data_misses 965 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 168952707 # DTB accesses -system.cpu.itb.fetch_hits 96790867 # ITB hits -system.cpu.itb.fetch_misses 1237 # ITB misses +system.cpu.dtb.data_accesses 168917800 # DTB accesses +system.cpu.itb.fetch_hits 96959231 # ITB hits +system.cpu.itb.fetch_misses 1239 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 96792104 # ITB accesses +system.cpu.itb.fetch_accesses 96960470 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -299,18 +299,18 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 215 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 447065925 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 467051579 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed -system.cpu.committedInsts 398664665 # Number of instructions committed -system.cpu.committedOps 398664665 # Number of ops (including micro ops) committed -system.cpu.discardedOps 2363843 # Number of ops (including micro ops) which were discarded before commit +system.cpu.committedInsts 398664651 # Number of instructions committed +system.cpu.committedOps 398664651 # Number of ops (including micro ops) committed +system.cpu.discardedOps 2289293 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.121408 # CPI: cycles per instruction -system.cpu.ipc 0.891736 # IPC: instructions per cycle +system.cpu.cpi 1.171540 # CPI: cycles per instruction +system.cpu.ipc 0.853577 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 23123356 5.80% 5.80% # Class of committed instruction -system.cpu.op_class_0::IntAlu 141652567 35.53% 41.33% # Class of committed instruction +system.cpu.op_class_0::IntAlu 141652555 35.53% 41.33% # Class of committed instruction system.cpu.op_class_0::IntMult 2124322 0.53% 41.86% # Class of committed instruction system.cpu.op_class_0::IntDiv 0 0.00% 41.86% # Class of committed instruction system.cpu.op_class_0::FloatAdd 35620060 8.93% 50.80% # Class of committed instruction @@ -339,81 +339,81 @@ system.cpu.op_class_0::SimdFloatMisc 0 0.00% 57.79% # Cl system.cpu.op_class_0::SimdFloatMult 0 0.00% 57.79% # Class of committed instruction system.cpu.op_class_0::SimdFloatMultAcc 0 0.00% 57.79% # Class of committed instruction system.cpu.op_class_0::SimdFloatSqrt 0 0.00% 57.79% # Class of committed instruction -system.cpu.op_class_0::MemRead 94754511 23.77% 81.56% # Class of committed instruction -system.cpu.op_class_0::MemWrite 73520765 18.44% 100.00% # Class of committed instruction +system.cpu.op_class_0::MemRead 94754510 23.77% 81.56% # Class of committed instruction +system.cpu.op_class_0::MemWrite 73520764 18.44% 100.00% # Class of committed instruction system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction -system.cpu.op_class_0::total 398664665 # Class of committed instruction -system.cpu.tickCycles 443407678 # Number of cycles that the object actually ticked -system.cpu.idleCycles 3658247 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states +system.cpu.op_class_0::total 398664651 # Class of committed instruction +system.cpu.tickCycles 455740556 # Number of cycles that the object actually ticked +system.cpu.idleCycles 11311023 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 771 # number of replacements -system.cpu.dcache.tags.tagsinuse 3291.617120 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 167826980 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 3291.966637 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 167817023 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 4165 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 40294.593037 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 40292.202401 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 3291.617120 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.803617 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.803617 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 3291.966637 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.803703 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.803703 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 3394 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 38 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 25 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 37 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 26 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::2 216 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::3 2 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::4 3113 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.828613 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 335672353 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 335672353 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 94312181 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 94312181 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 73514799 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 73514799 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 167826980 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 167826980 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 167826980 # number of overall hits -system.cpu.dcache.overall_hits::total 167826980 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 1183 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 1183 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 5931 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 5931 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 7114 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 7114 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 7114 # number of overall misses -system.cpu.dcache.overall_misses::total 7114 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 88520000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 88520000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 429316500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 429316500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 517836500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 517836500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 517836500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 517836500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 94313364 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 94313364 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.WriteReq_accesses::cpu.data 73520730 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.WriteReq_accesses::total 73520730 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 167834094 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 167834094 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 167834094 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 167834094 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000013 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.000013 # miss rate for ReadReq accesses +system.cpu.dcache.tags.tag_accesses 335652191 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 335652191 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 94302223 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 94302223 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 73514800 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 73514800 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 167817023 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 167817023 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 167817023 # number of overall hits +system.cpu.dcache.overall_hits::total 167817023 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 1061 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 1061 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 5929 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 5929 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 6990 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 6990 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 6990 # number of overall misses +system.cpu.dcache.overall_misses::total 6990 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 77930500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 77930500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 429190000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 429190000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 507120500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 507120500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 507120500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 507120500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 94303284 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 94303284 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.WriteReq_accesses::cpu.data 73520729 # number of WriteReq accesses(hits+misses) +system.cpu.dcache.WriteReq_accesses::total 73520729 # number of WriteReq accesses(hits+misses) +system.cpu.dcache.demand_accesses::cpu.data 167824013 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 167824013 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 167824013 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 167824013 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000011 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.000011 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.000081 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.000081 # miss rate for WriteReq accesses system.cpu.dcache.demand_miss_rate::cpu.data 0.000042 # miss rate for demand accesses system.cpu.dcache.demand_miss_rate::total 0.000042 # miss rate for demand accesses system.cpu.dcache.overall_miss_rate::cpu.data 0.000042 # miss rate for overall accesses system.cpu.dcache.overall_miss_rate::total 0.000042 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 74826.711750 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 74826.711750 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 72385.179565 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 72385.179565 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 72791.186393 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 72791.186393 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 72791.186393 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 72791.186393 # average overall miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 73450.047125 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 73450.047125 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 72388.261090 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 72388.261090 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 72549.427754 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 72549.427754 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 72549.427754 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 72549.427754 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -422,14 +422,14 @@ system.cpu.dcache.avg_blocked_cycles::no_mshrs nan system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.dcache.writebacks::writebacks 654 # number of writebacks system.cpu.dcache.writebacks::total 654 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 214 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 214 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 2735 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 2735 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 2949 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 2949 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 2949 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 2949 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 92 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 92 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 2733 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 2733 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 2825 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 2825 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 2825 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 2825 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 969 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 969 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 3196 # number of WriteReq MSHR misses @@ -438,14 +438,14 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 4165 system.cpu.dcache.demand_mshr_misses::total 4165 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 4165 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 4165 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 71272000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 71272000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 239421000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 239421000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 310693000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 310693000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 310693000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 310693000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 70280500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 70280500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 239912500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 239912500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 310193000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 310193000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 310193000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 310193000 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.000010 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.000010 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000043 # mshr miss rate for WriteReq accesses @@ -454,128 +454,128 @@ system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.000025 system.cpu.dcache.demand_mshr_miss_rate::total 0.000025 # mshr miss rate for demand accesses system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.000025 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate::total 0.000025 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 73552.115583 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 73552.115583 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 74912.703379 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 74912.703379 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 74596.158463 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 74596.158463 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 74596.158463 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 74596.158463 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 3190 # number of replacements -system.cpu.icache.tags.tagsinuse 1919.630000 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 96785699 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 5168 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 18727.882933 # Average number of references to valid blocks. +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 72528.895769 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 72528.895769 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 75066.489362 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 75066.489362 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 74476.110444 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 74476.110444 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 74476.110444 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 74476.110444 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 3193 # number of replacements +system.cpu.icache.tags.tagsinuse 1919.750364 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 96954060 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 5171 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 18749.576484 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1919.630000 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.937319 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.937319 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 1919.750364 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.937378 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.937378 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 1978 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 92 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 203 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 87 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 208 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::2 396 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::4 1287 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.965820 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 193586902 # Number of tag accesses -system.cpu.icache.tags.data_accesses 193586902 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 96785699 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 96785699 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 96785699 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 96785699 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 96785699 # number of overall hits -system.cpu.icache.overall_hits::total 96785699 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 5168 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 5168 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 5168 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 5168 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 5168 # number of overall misses -system.cpu.icache.overall_misses::total 5168 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 316704500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 316704500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 316704500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 316704500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 316704500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 316704500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 96790867 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 96790867 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 96790867 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 96790867 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 96790867 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 96790867 # number of overall (read+write) accesses +system.cpu.icache.tags.tag_accesses 193923633 # Number of tag accesses +system.cpu.icache.tags.data_accesses 193923633 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 96954060 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 96954060 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 96954060 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 96954060 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 96954060 # number of overall hits +system.cpu.icache.overall_hits::total 96954060 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 5171 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 5171 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 5171 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 5171 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 5171 # number of overall misses +system.cpu.icache.overall_misses::total 5171 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 318040500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 318040500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 318040500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 318040500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 318040500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 318040500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 96959231 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 96959231 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 96959231 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 96959231 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 96959231 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 96959231 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000053 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000053 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000053 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000053 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000053 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000053 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 61281.830495 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 61281.830495 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 61281.830495 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 61281.830495 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 61281.830495 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 61281.830495 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 61504.641269 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 61504.641269 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 61504.641269 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 61504.641269 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 61504.641269 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 61504.641269 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 3190 # number of writebacks -system.cpu.icache.writebacks::total 3190 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 5168 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 5168 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 5168 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 5168 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 5168 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 5168 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 311536500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 311536500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 311536500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 311536500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 311536500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 311536500 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 3193 # number of writebacks +system.cpu.icache.writebacks::total 3193 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 5171 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 5171 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 5171 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 5171 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 5171 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 5171 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 312869500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 312869500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 312869500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 312869500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 312869500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 312869500 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000053 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000053 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000053 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000053 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000053 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000053 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 60281.830495 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 60281.830495 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 60281.830495 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 60281.830495 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 60281.830495 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 60281.830495 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 60504.641269 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 60504.641269 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 60504.641269 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 60504.641269 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 60504.641269 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 60504.641269 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 4421.902302 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 4798 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 5270 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 0.910436 # Average number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 4425.384656 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 4801 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 5273 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 0.910487 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 372.081904 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 3407.854115 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 641.966284 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.011355 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.103999 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.019591 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.134946 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 5270 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 93 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 125 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 372.164909 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 3411.179805 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 642.039942 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.011358 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.104101 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.019594 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.135052 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 5273 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 92 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 126 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::2 613 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 4439 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.160828 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 114820 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 114820 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 4442 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.160919 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 114871 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 114871 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.WritebackDirty_hits::writebacks 654 # number of WritebackDirty hits system.cpu.l2cache.WritebackDirty_hits::total 654 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 3190 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 3190 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::writebacks 3193 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 3193 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 61 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 61 # number of ReadExReq hits system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 1276 # number of ReadCleanReq hits @@ -590,68 +590,68 @@ system.cpu.l2cache.overall_hits::cpu.data 187 # n system.cpu.l2cache.overall_hits::total 1463 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 3137 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 3137 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3892 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 3892 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3895 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 3895 # number of ReadCleanReq misses system.cpu.l2cache.ReadSharedReq_misses::cpu.data 841 # number of ReadSharedReq misses system.cpu.l2cache.ReadSharedReq_misses::total 841 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 3892 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.inst 3895 # number of demand (read+write) misses system.cpu.l2cache.demand_misses::cpu.data 3978 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 7870 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 3892 # number of overall misses +system.cpu.l2cache.demand_misses::total 7873 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 3895 # number of overall misses system.cpu.l2cache.overall_misses::cpu.data 3978 # number of overall misses -system.cpu.l2cache.overall_misses::total 7870 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 234104000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 234104000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 290385500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 290385500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 68345000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 68345000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 290385500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 302449000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 592834500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 290385500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 302449000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 592834500 # number of overall miss cycles +system.cpu.l2cache.overall_misses::total 7873 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 234589500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 234589500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 291713500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 291713500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 67354500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 67354500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 291713500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 301944000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 593657500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 291713500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 301944000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 593657500 # number of overall miss cycles system.cpu.l2cache.WritebackDirty_accesses::writebacks 654 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackDirty_accesses::total 654 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 3190 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 3190 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 3193 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 3193 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 3198 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 3198 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 5168 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 5168 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 5171 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 5171 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 967 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::total 967 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 5168 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.inst 5171 # number of demand (read+write) accesses system.cpu.l2cache.demand_accesses::cpu.data 4165 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 9333 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 5168 # number of overall (read+write) accesses +system.cpu.l2cache.demand_accesses::total 9336 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 5171 # number of overall (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.data 4165 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 9333 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 9336 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.980926 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.980926 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.753096 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.753096 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.753239 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.753239 # miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.869700 # miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.869700 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.753096 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.753239 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate::cpu.data 0.955102 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.843244 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.753096 # miss rate for overall accesses +system.cpu.l2cache.demand_miss_rate::total 0.843295 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.753239 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::cpu.data 0.955102 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.843244 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74626.713420 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74626.713420 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74610.868448 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74610.868448 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 81266.349584 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 81266.349584 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74610.868448 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76030.417295 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 75328.398983 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74610.868448 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76030.417295 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 75328.398983 # average overall miss latency +system.cpu.l2cache.overall_miss_rate::total 0.843295 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74781.479120 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74781.479120 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74894.351733 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74894.351733 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80088.585018 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80088.585018 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74894.351733 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 75903.469080 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 75404.229646 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74894.351733 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 75903.469080 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 75404.229646 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -660,114 +660,116 @@ system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 3137 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 3137 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3892 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3892 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3895 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3895 # number of ReadCleanReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 841 # number of ReadSharedReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::total 841 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 3892 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 3895 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.data 3978 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 7870 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 3892 # number of overall MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 7873 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 3895 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 3978 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 7870 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 202734000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 202734000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 251465500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 251465500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 59935000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 59935000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 251465500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 262669000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 514134500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 251465500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 262669000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 514134500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::total 7873 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 203219500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 203219500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 252763500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 252763500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 58944500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 58944500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 252763500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 262164000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 514927500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 252763500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 262164000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 514927500 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.980926 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.980926 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.753096 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.753096 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.753239 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.753239 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.869700 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.869700 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.753096 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.753239 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.955102 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.843244 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.753096 # mshr miss rate for overall accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.843295 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.753239 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.955102 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.843244 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64626.713420 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64626.713420 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64610.868448 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64610.868448 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 71266.349584 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 71266.349584 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64610.868448 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66030.417295 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 65328.398983 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64610.868448 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66030.417295 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 65328.398983 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 13294 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 3961 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::total 0.843295 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64781.479120 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64781.479120 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64894.351733 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64894.351733 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70088.585018 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70088.585018 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64894.351733 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 65903.469080 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 65404.229646 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64894.351733 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 65903.469080 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 65404.229646 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 13300 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 3964 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 6135 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 6138 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackDirty 654 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 3190 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 3193 # Transaction distribution system.cpu.toL2Bus.trans_dist::CleanEvict 117 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 3198 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 3198 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 5168 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 5171 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadSharedReq 967 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 13526 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 13535 # Packet count per connected master and slave (bytes) system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9101 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 22627 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 534912 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 22636 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 535296 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 308416 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 843328 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 843712 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 9333 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 9336 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 9333 100.00% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 9336 100.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 0 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 9333 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 10491000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 9336 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 10497000 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 7752000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 7756500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) system.cpu.toL2Bus.respLayer1.occupancy 6247999 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.0 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 223532962500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 4733 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 233525789500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 4736 # Transaction distribution system.membus.trans_dist::ReadExReq 3137 # Transaction distribution system.membus.trans_dist::ReadExResp 3137 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 4733 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 15740 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 15740 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 503680 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 503680 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 4736 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 15746 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 15746 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 503872 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 503872 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 7870 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 7873 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 7870 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 7873 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 7870 # Request fanout histogram -system.membus.reqLayer0.occupancy 9176500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 7873 # Request fanout histogram +system.membus.reqLayer0.occupancy 9219000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 41781750 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 41801750 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.0 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/config.ini b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/config.ini index fda724fd7..e7c466732 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -68,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -104,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -143,11 +157,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -155,13 +176,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -171,6 +197,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -179,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -502,13 +534,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -518,6 +555,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -526,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -551,13 +594,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -567,6 +615,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -575,19 +624,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -595,6 +656,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -609,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/eon gid=100 input=cin kvmInSE=false @@ -641,9 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -687,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -698,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simerr b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simerr index 3b53ebc6c..9c10deefc 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simerr +++ b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simout b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simout index d6aa6688c..02658fe82 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simout +++ b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/simout @@ -3,15 +3,15 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/o3-timing gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 20:55:00 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/o3-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4299 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/alpha/tru64/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Eon, Version 1.1 info: Increasing stack size by one page. -OO-style eon Time= 0.066667 -Exiting @ tick 67874346000 because target called exit() +OO-style eon Time= 0.050000 +Exiting @ tick 64188759000 because target called exit() diff --git a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/stats.txt b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/stats.txt index 68a991d52..81cd1b880 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/long/se/30.eon/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.064189 # Nu sim_ticks 64188759000 # Number of ticks simulated final_tick 64188759000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 392159 # Simulator instruction rate (inst/s) -host_op_rate 392159 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 67023124 # Simulator tick rate (ticks/s) -host_mem_usage 303292 # Number of bytes of host memory used -host_seconds 957.71 # Real time elapsed on the host +host_inst_rate 260398 # Simulator instruction rate (inst/s) +host_op_rate 260398 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 44504184 # Simulator tick rate (ticks/s) +host_mem_usage 257256 # Number of bytes of host memory used +host_seconds 1442.31 # Real time elapsed on the host sim_insts 375574794 # Number of instructions simulated sim_ops 375574794 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -985,6 +985,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 309184 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 705472 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 8236 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -1012,6 +1013,7 @@ system.membus.pkt_count::total 14880 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 476160 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 476160 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 7440 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/config.ini b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/config.ini index 427c7c717..7b7341967 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/config.ini +++ b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -83,13 +97,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -99,6 +118,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -107,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -123,13 +148,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -139,6 +169,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -147,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -172,13 +208,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -188,6 +229,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -196,19 +238,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -216,6 +270,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -230,7 +291,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/eon gid=100 input=cin kvmInSE=false @@ -262,9 +323,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -279,11 +346,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simerr b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simerr index 664365742..870cfd899 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simerr +++ b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simout b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simout index ab67caf1c..1c6cb75e4 100755 --- a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simout +++ b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/simout @@ -1,14 +1,17 @@ +Redirecting stdout to build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/simple-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 22 2014 16:27:55 -gem5 started Jan 22 2014 17:48:27 -gem5 executing on u200540-lin -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/simple-timing -re tests/run.py build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/simple-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4302 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/30.eon/alpha/tru64/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/alpha/tru64/simple-timing + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. Eon, Version 1.1 info: Increasing stack size by one page. OO-style eon Time= 0.566667 -Exiting @ tick 567335093000 because target called exit() +Exiting @ tick 567385356500 because target called exit() diff --git a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/stats.txt b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/stats.txt index d0130300a..9532c68be 100644 --- a/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/stats.txt +++ b/tests/long/se/30.eon/ref/alpha/tru64/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.567385 # Nu sim_ticks 567385356500 # Number of ticks simulated final_tick 567385356500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1687815 # Simulator instruction rate (inst/s) -host_op_rate 1687815 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2402123351 # Simulator tick rate (ticks/s) -host_mem_usage 300208 # Number of bytes of host memory used -host_seconds 236.20 # Real time elapsed on the host +host_inst_rate 1154582 # Simulator instruction rate (inst/s) +host_op_rate 1154582 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1643217424 # Simulator tick rate (ticks/s) +host_mem_usage 254440 # Number of bytes of host memory used +host_seconds 345.29 # Real time elapsed on the host sim_insts 398664609 # Number of instructions simulated sim_ops 398664609 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -500,6 +500,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 307264 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 655552 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 7825 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -527,6 +528,7 @@ system.membus.pkt_count::total 14348 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 459136 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 459136 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 7174 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/30.eon/ref/arm/linux/minor-timing/config.ini b/tests/long/se/30.eon/ref/arm/linux/minor-timing/config.ini index c0afc2364..76d7daa42 100644 --- a/tests/long/se/30.eon/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/30.eon/ref/arm/linux/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -99,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -120,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -132,13 +154,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -148,6 +175,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -156,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -180,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -196,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -591,13 +634,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -607,6 +655,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -615,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -626,6 +680,7 @@ eventq_index=0 [system.cpu.isa] type=ArmISA +decoderFlavour=Generic eventq_index=0 fpsid=1090793632 id_aa64afr0_el1=0 @@ -673,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -689,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -701,13 +766,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -717,6 +787,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -725,19 +796,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -745,6 +828,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -759,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/eon gid=100 input=cin kvmInSE=false @@ -791,9 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -837,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -848,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/30.eon/ref/arm/linux/minor-timing/simerr b/tests/long/se/30.eon/ref/arm/linux/minor-timing/simerr index 62f25930d..497b78d8e 100755 --- a/tests/long/se/30.eon/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/30.eon/ref/arm/linux/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick getting pixel output filename pixels_out.cook opening control file chair.control.cook opening camera file chair.camera diff --git a/tests/long/se/30.eon/ref/arm/linux/minor-timing/simout b/tests/long/se/30.eon/ref/arm/linux/minor-timing/simout index 8d785cb1f..ab196f487 100755 --- a/tests/long/se/30.eon/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/30.eon/ref/arm/linux/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/30.eon/arm/linux/minor-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 01:25:17 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/30.eon/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:22 +gem5 executing on e108600-lin, pid 23074 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -15,5 +15,5 @@ Eon, Version 1.1 info: Increasing stack size by one page. info: Increasing stack size by one page. info: Increasing stack size by one page. -OO-style eon Time= 0.210000 -Exiting @ tick 215505832500 because target called exit() +OO-style eon Time= 0.220000 +Exiting @ tick 225030243000 because target called exit() diff --git a/tests/long/se/30.eon/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/30.eon/ref/arm/linux/minor-timing/stats.txt index 521f1135c..0b49d498f 100644 --- a/tests/long/se/30.eon/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/30.eon/ref/arm/linux/minor-timing/stats.txt @@ -1,43 +1,43 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.211715 # Number of seconds simulated -sim_ticks 211714953000 # Number of ticks simulated -final_tick 211714953000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.225030 # Number of seconds simulated +sim_ticks 225030243000 # Number of ticks simulated +final_tick 225030243000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 271910 # Simulator instruction rate (inst/s) -host_op_rate 326458 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 210840466 # Simulator tick rate (ticks/s) -host_mem_usage 322892 # Number of bytes of host memory used -host_seconds 1004.15 # Real time elapsed on the host -sim_insts 273037857 # Number of instructions simulated -sim_ops 327812214 # Number of ops (including micro ops) simulated +host_inst_rate 131394 # Simulator instruction rate (inst/s) +host_op_rate 157754 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 108291606 # Simulator tick rate (ticks/s) +host_mem_usage 275248 # Number of bytes of host memory used +host_seconds 2078.00 # Real time elapsed on the host +sim_insts 273037855 # Number of instructions simulated +sim_ops 327812212 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 219072 # Number of bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 219136 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 266432 # Number of bytes read from this memory -system.physmem.bytes_read::total 485504 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 219072 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 219072 # Number of instructions bytes read from this memory -system.physmem.num_reads::cpu.inst 3423 # Number of read requests responded to by this memory +system.physmem.bytes_read::total 485568 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 219136 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 219136 # Number of instructions bytes read from this memory +system.physmem.num_reads::cpu.inst 3424 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 4163 # Number of read requests responded to by this memory -system.physmem.num_reads::total 7586 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 1034750 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 1258447 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 2293197 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 1034750 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 1034750 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 1034750 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 1258447 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 2293197 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 7586 # Number of read requests accepted +system.physmem.num_reads::total 7587 # Number of read requests responded to by this memory +system.physmem.bw_read::cpu.inst 973807 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 1183983 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 2157790 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 973807 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 973807 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 973807 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 1183983 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 2157790 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 7587 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted -system.physmem.readBursts 7586 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 7587 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 0 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 485504 # Total number of bytes read from DRAM +system.physmem.bytesReadDRAM 485568 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 0 # Total number of bytes read from write queue system.physmem.bytesWritten 0 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 485504 # Total read bytes from the system interface side +system.physmem.bytesReadSys 485568 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 0 # Total written bytes from the system interface side system.physmem.servicedByWrQ 0 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one @@ -51,13 +51,13 @@ system.physmem.perBankRdBursts::5 349 # Pe system.physmem.perBankRdBursts::6 171 # Per bank write bursts system.physmem.perBankRdBursts::7 228 # Per bank write bursts system.physmem.perBankRdBursts::8 208 # Per bank write bursts -system.physmem.perBankRdBursts::9 310 # Per bank write bursts +system.physmem.perBankRdBursts::9 309 # Per bank write bursts system.physmem.perBankRdBursts::10 343 # Per bank write bursts system.physmem.perBankRdBursts::11 428 # Per bank write bursts system.physmem.perBankRdBursts::12 553 # Per bank write bursts system.physmem.perBankRdBursts::13 705 # Per bank write bursts -system.physmem.perBankRdBursts::14 638 # Per bank write bursts -system.physmem.perBankRdBursts::15 542 # Per bank write bursts +system.physmem.perBankRdBursts::14 639 # Per bank write bursts +system.physmem.perBankRdBursts::15 543 # Per bank write bursts system.physmem.perBankWrBursts::0 0 # Per bank write bursts system.physmem.perBankWrBursts::1 0 # Per bank write bursts system.physmem.perBankWrBursts::2 0 # Per bank write bursts @@ -76,14 +76,14 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 211714708500 # Total gap between requests +system.physmem.totGap 225029996000 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 7586 # Read request sizes (log2) +system.physmem.readPktSize::6 7587 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 6629 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 897 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 60 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 6713 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 823 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 51 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -187,86 +187,86 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 1530 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 316.067974 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 186.296863 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 330.878934 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 560 36.60% 36.60% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 363 23.73% 60.33% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 160 10.46% 70.78% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 74 4.84% 75.62% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 70 4.58% 80.20% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 59 3.86% 84.05% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 34 2.22% 86.27% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 28 1.83% 88.10% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 182 11.90% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 1530 # Bytes accessed per row activation -system.physmem.totQLat 52630500 # Total ticks spent queuing -system.physmem.totMemAccLat 194868000 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 37930000 # Total ticks spent in databus transfers -system.physmem.avgQLat 6937.85 # Average queueing delay per DRAM burst +system.physmem.bytesPerActivate::samples 1511 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 320.084712 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 189.611752 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 331.049486 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 552 36.53% 36.53% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 328 21.71% 58.24% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 178 11.78% 70.02% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 86 5.69% 75.71% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 72 4.77% 80.48% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 49 3.24% 83.72% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 32 2.12% 85.84% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 31 2.05% 87.89% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 183 12.11% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 1511 # Bytes accessed per row activation +system.physmem.totQLat 51456750 # Total ticks spent queuing +system.physmem.totMemAccLat 193713000 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 37935000 # Total ticks spent in databus transfers +system.physmem.avgQLat 6782.23 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 25687.85 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 2.29 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 25532.23 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 2.16 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 2.29 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 2.16 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s system.physmem.busUtil 0.02 # Data bus utilization in percentage system.physmem.busUtilRead 0.02 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes -system.physmem.avgRdQLen 1.05 # Average read queue length when enqueuing +system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 6048 # Number of row buffer hits during reads +system.physmem.readRowHits 6068 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 79.73 # Row buffer hit rate for reads +system.physmem.readRowHitRate 79.98 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 27908609.08 # Average gap between requests -system.physmem.pageHitRate 79.73 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 5080320 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 2772000 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 29905200 # Energy for read commands per rank (pJ) +system.physmem.avgGap 29659944.11 # Average gap between requests +system.physmem.pageHitRate 79.98 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 5012280 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 2734875 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 29881800 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 13827746400 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 5529396150 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 122174691000 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 141569591070 # Total energy per rank (pJ) -system.physmem_0.averagePower 668.700877 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 203247000500 # Time in different power states -system.physmem_0.memoryStateTime::REF 7069400000 # Time in different power states +system.physmem_0.refreshEnergy 14697384000 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 5831471925 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 129898404750 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 150464889630 # Total energy per rank (pJ) +system.physmem_0.averagePower 668.664832 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 216095628500 # Time in different power states +system.physmem_0.memoryStateTime::REF 7514000000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 1392729000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 1413270250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 6463800 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 3526875 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 28992600 # Energy for read commands per rank (pJ) +system.physmem_1.actEnergy 6380640 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 3481500 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 29000400 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 13827746400 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 5726317185 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 122001953250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 141595000110 # Total energy per rank (pJ) -system.physmem_1.averagePower 668.820896 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 202960400000 # Time in different power states -system.physmem_1.memoryStateTime::REF 7069400000 # Time in different power states +system.physmem_1.refreshEnergy 14697384000 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 6004643625 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 129746499750 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 150487389915 # Total energy per rank (pJ) +system.physmem_1.averagePower 668.764823 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 215845139250 # Time in different power states +system.physmem_1.memoryStateTime::REF 7514000000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 1682763000 # Time in different power states +system.physmem_1.memoryStateTime::ACT 1668675750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 32413931 # Number of BP lookups -system.cpu.branchPred.condPredicted 16919661 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 738142 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 17496692 # Number of BTB lookups -system.cpu.branchPred.BTBHits 12856502 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 32430290 # Number of BP lookups +system.cpu.branchPred.condPredicted 16924100 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 738493 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 17494980 # Number of BTB lookups +system.cpu.branchPred.BTBHits 12858502 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 73.479615 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 6512761 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 3 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 2303892 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 2264485 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 39407 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 128263 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 73.498238 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 6523127 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 4 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 2303930 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 2264813 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 39117 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 128237 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -296,7 +296,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -326,7 +326,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -356,7 +356,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -387,18 +387,18 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 191 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 423429906 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 450060486 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed -system.cpu.committedInsts 273037857 # Number of instructions committed -system.cpu.committedOps 327812214 # Number of ops (including micro ops) committed -system.cpu.discardedOps 2127081 # Number of ops (including micro ops) which were discarded before commit +system.cpu.committedInsts 273037855 # Number of instructions committed +system.cpu.committedOps 327812212 # Number of ops (including micro ops) committed +system.cpu.discardedOps 2063972 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.550810 # CPI: cycles per instruction -system.cpu.ipc 0.644824 # IPC: instructions per cycle +system.cpu.cpi 1.648345 # CPI: cycles per instruction +system.cpu.ipc 0.606669 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction -system.cpu.op_class_0::IntAlu 104312544 31.82% 31.82% # Class of committed instruction +system.cpu.op_class_0::IntAlu 104312542 31.82% 31.82% # Class of committed instruction system.cpu.op_class_0::IntMult 2145905 0.65% 32.48% # Class of committed instruction system.cpu.op_class_0::IntDiv 0 0.00% 32.48% # Class of committed instruction system.cpu.op_class_0::FloatAdd 0 0.00% 32.48% # Class of committed instruction @@ -431,93 +431,93 @@ system.cpu.op_class_0::MemRead 85732248 26.15% 74.87% # Cl system.cpu.op_class_0::MemWrite 82375599 25.13% 100.00% # Class of committed instruction system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction -system.cpu.op_class_0::total 327812214 # Class of committed instruction -system.cpu.tickCycles 420106568 # Number of cycles that the object actually ticked -system.cpu.idleCycles 3323338 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.op_class_0::total 327812212 # Class of committed instruction +system.cpu.tickCycles 434886518 # Number of cycles that the object actually ticked +system.cpu.idleCycles 15173968 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 1355 # number of replacements -system.cpu.dcache.tags.tagsinuse 3085.570959 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 168654881 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 3086.261687 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 168654217 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 4512 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 37379.184619 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 37379.037456 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 3085.570959 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.753313 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.753313 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 3086.261687 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.753482 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.753482 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 3157 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 21 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 21 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 12 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 672 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 19 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 23 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 7 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 677 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::4 2431 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.770752 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 337328856 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 337328856 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 86522107 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 86522107 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 82047451 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 82047451 # number of WriteReq hits -system.cpu.dcache.SoftPFReq_hits::cpu.data 63533 # number of SoftPFReq hits -system.cpu.dcache.SoftPFReq_hits::total 63533 # number of SoftPFReq hits +system.cpu.dcache.tags.tag_accesses 337326818 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 337326818 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 86521433 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 86521433 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 82047456 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 82047456 # number of WriteReq hits +system.cpu.dcache.SoftPFReq_hits::cpu.data 63538 # number of SoftPFReq hits +system.cpu.dcache.SoftPFReq_hits::total 63538 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 10895 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 10895 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 10895 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 10895 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 168569558 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 168569558 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 168633091 # number of overall hits -system.cpu.dcache.overall_hits::total 168633091 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 2060 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 2060 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 5226 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 5226 # number of WriteReq misses +system.cpu.dcache.demand_hits::cpu.data 168568889 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 168568889 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 168632427 # number of overall hits +system.cpu.dcache.overall_hits::total 168632427 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 1710 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 1710 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 5221 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 5221 # number of WriteReq misses system.cpu.dcache.SoftPFReq_misses::cpu.data 5 # number of SoftPFReq misses system.cpu.dcache.SoftPFReq_misses::total 5 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 7286 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 7286 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 7291 # number of overall misses -system.cpu.dcache.overall_misses::total 7291 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 136635000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 136635000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 394688000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 394688000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 531323000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 531323000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 531323000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 531323000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 86524167 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 86524167 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 6931 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 6931 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 6936 # number of overall misses +system.cpu.dcache.overall_misses::total 6936 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 114932500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 114932500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 393586500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 393586500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 508519000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 508519000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 508519000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 508519000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 86523143 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 86523143 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 82052677 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 82052677 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::cpu.data 63538 # number of SoftPFReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::total 63538 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::cpu.data 63543 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::total 63543 # number of SoftPFReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::cpu.data 10895 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::total 10895 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 10895 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 10895 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 168576844 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 168576844 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 168640382 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 168640382 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000024 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.000024 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 168575820 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 168575820 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 168639363 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 168639363 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000020 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.000020 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.000064 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.000064 # miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.000079 # miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_miss_rate::total 0.000079 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.000043 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.000043 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.000043 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.000043 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 66327.669903 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 66327.669903 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 75523.918867 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 75523.918867 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 72923.826517 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 72923.826517 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 72873.817035 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 72873.817035 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.000041 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.000041 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.000041 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.000041 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 67211.988304 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 67211.988304 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 75385.271021 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 75385.271021 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 73368.777954 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 73368.777954 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 73315.888120 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 73315.888120 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -526,14 +526,14 @@ system.cpu.dcache.avg_blocked_cycles::no_mshrs nan system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.dcache.writebacks::writebacks 1010 # number of writebacks system.cpu.dcache.writebacks::total 1010 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 421 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 421 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 2356 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 2356 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 2777 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 2777 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 2777 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 2777 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 71 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 71 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 2351 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 2351 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 2422 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 2422 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 2422 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 2422 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 1639 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 1639 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 2870 # number of WriteReq MSHR misses @@ -544,16 +544,16 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 4509 system.cpu.dcache.demand_mshr_misses::total 4509 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 4512 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 4512 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 109916500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 109916500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 219842000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 219842000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 481000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 481000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 329758500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 329758500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 330239500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 330239500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 110662500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 110662500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 219478500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 219478500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 238000 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 238000 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 330141000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 330141000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 330379000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 330379000 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.000019 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.000019 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000035 # mshr miss rate for WriteReq accesses @@ -564,208 +564,208 @@ system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.000027 system.cpu.dcache.demand_mshr_miss_rate::total 0.000027 # mshr miss rate for demand accesses system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.000027 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate::total 0.000027 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 67063.148261 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 67063.148261 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 76600 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 76600 # average WriteReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 160333.333333 # average SoftPFReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 160333.333333 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 73133.399867 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 73133.399867 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 73191.378546 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 73191.378546 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 38168 # number of replacements -system.cpu.icache.tags.tagsinuse 1923.744161 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 69641436 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 40104 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 1736.520946 # Average number of references to valid blocks. +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 67518.303844 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 67518.303844 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 76473.344948 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 76473.344948 # average WriteReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 79333.333333 # average SoftPFReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 79333.333333 # average SoftPFReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 73218.230206 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 73218.230206 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 73222.296099 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 73222.296099 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 38188 # number of replacements +system.cpu.icache.tags.tagsinuse 1925.010528 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 69819783 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 40125 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 1740.056897 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1923.744161 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.939328 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.939328 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1936 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 60 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 84 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::2 33 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 276 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1483 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.945312 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 139403186 # Number of tag accesses -system.cpu.icache.tags.data_accesses 139403186 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 69641436 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 69641436 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 69641436 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 69641436 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 69641436 # number of overall hits -system.cpu.icache.overall_hits::total 69641436 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 40105 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 40105 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 40105 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 40105 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 40105 # number of overall misses -system.cpu.icache.overall_misses::total 40105 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 757528000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 757528000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 757528000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 757528000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 757528000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 757528000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 69681541 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 69681541 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 69681541 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 69681541 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 69681541 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 69681541 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000576 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.000576 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.000576 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.000576 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.000576 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.000576 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 18888.617379 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 18888.617379 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 18888.617379 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 18888.617379 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 18888.617379 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 18888.617379 # average overall miss latency +system.cpu.icache.tags.occ_blocks::cpu.inst 1925.010528 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.939947 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.939947 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1937 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 59 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 85 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::2 32 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::3 277 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1484 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.945801 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 139759943 # Number of tag accesses +system.cpu.icache.tags.data_accesses 139759943 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 69819783 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 69819783 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 69819783 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 69819783 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 69819783 # number of overall hits +system.cpu.icache.overall_hits::total 69819783 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 40126 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 40126 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 40126 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 40126 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 40126 # number of overall misses +system.cpu.icache.overall_misses::total 40126 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 756662500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 756662500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 756662500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 756662500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 756662500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 756662500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 69859909 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 69859909 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 69859909 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 69859909 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 69859909 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 69859909 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000574 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.000574 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.000574 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.000574 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.000574 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.000574 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 18857.162438 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 18857.162438 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 18857.162438 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 18857.162438 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 18857.162438 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 18857.162438 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 38168 # number of writebacks -system.cpu.icache.writebacks::total 38168 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 40105 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 40105 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 40105 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 40105 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 40105 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 40105 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 717424000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 717424000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 717424000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 717424000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 717424000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 717424000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000576 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000576 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000576 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.000576 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000576 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.000576 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 17888.642314 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 17888.642314 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 17888.642314 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 17888.642314 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 17888.642314 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 17888.642314 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.icache.writebacks::writebacks 38188 # number of writebacks +system.cpu.icache.writebacks::total 38188 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 40126 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 40126 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 40126 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 40126 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 40126 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 40126 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 716537500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 716537500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 716537500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 716537500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 716537500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 716537500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000574 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000574 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000574 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.000574 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000574 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.000574 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 17857.187360 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 17857.187360 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 17857.187360 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 17857.187360 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 17857.187360 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 17857.187360 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 4199.701287 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 60529 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 5648 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 10.716891 # Average number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 4201.230054 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 60569 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 5649 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 10.722075 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 353.800339 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 3167.579629 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 678.321319 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.010797 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.096667 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.020701 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.128165 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 5648 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 56 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 37 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 42 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 1251 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 354.127692 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 3168.434045 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 678.668317 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.010807 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.096693 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.020711 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.128211 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 5649 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 55 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 38 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 37 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 1257 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::4 4262 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.172363 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 561366 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 561366 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.172394 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 561687 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 561687 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states system.cpu.l2cache.WritebackDirty_hits::writebacks 1010 # number of WritebackDirty hits system.cpu.l2cache.WritebackDirty_hits::total 1010 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 23251 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 23251 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::writebacks 23270 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 23270 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 16 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 16 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 36680 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 36680 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 291 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 291 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 36680 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 307 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 36987 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 36680 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 307 # number of overall hits -system.cpu.l2cache.overall_hits::total 36987 # number of overall hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 36700 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 36700 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 292 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 292 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 36700 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 308 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 37008 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 36700 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 308 # number of overall hits +system.cpu.l2cache.overall_hits::total 37008 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 2854 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 2854 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3425 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 3425 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1351 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 1351 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 3425 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 4205 # number of demand (read+write) misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3426 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 3426 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1350 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 1350 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 3426 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 4204 # number of demand (read+write) misses system.cpu.l2cache.demand_misses::total 7630 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 3425 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 4205 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.inst 3426 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 4204 # number of overall misses system.cpu.l2cache.overall_misses::total 7630 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 215334500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 215334500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 257203500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 257203500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 104684500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 104684500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 257203500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 320019000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 577222500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 257203500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 320019000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 577222500 # number of overall miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 214976500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 214976500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 256075000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 256075000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 105174500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 105174500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 256075000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 320151000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 576226000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 256075000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 320151000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 576226000 # number of overall miss cycles system.cpu.l2cache.WritebackDirty_accesses::writebacks 1010 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackDirty_accesses::total 1010 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 23251 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 23251 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 23270 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 23270 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 2870 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 2870 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 40105 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 40105 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 40126 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 40126 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 1642 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::total 1642 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 40105 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.inst 40126 # number of demand (read+write) accesses system.cpu.l2cache.demand_accesses::cpu.data 4512 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 44617 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 40105 # number of overall (read+write) accesses +system.cpu.l2cache.demand_accesses::total 44638 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 40126 # number of overall (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.data 4512 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 44617 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 44638 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.994425 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.994425 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.085401 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.085401 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.822777 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.822777 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.085401 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.931959 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.171011 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.085401 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.931959 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.171011 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 75450.070077 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 75450.070077 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 75095.912409 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 75095.912409 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 77486.676536 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 77486.676536 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 75095.912409 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76104.399524 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 75651.703801 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 75095.912409 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76104.399524 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 75651.703801 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.085381 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.085381 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.822168 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.822168 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.085381 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.931738 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.170931 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.085381 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.931738 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.170931 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 75324.632095 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 75324.632095 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74744.600117 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74744.600117 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 77907.037037 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 77907.037037 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74744.600117 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76153.901047 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 75521.100917 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74744.600117 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76153.901047 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 75521.100917 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -774,124 +774,126 @@ system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 2 # number of ReadCleanReq MSHR hits system.cpu.l2cache.ReadCleanReq_mshr_hits::total 2 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 42 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 42 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 41 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 41 # number of ReadSharedReq MSHR hits system.cpu.l2cache.demand_mshr_hits::cpu.inst 2 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 42 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 44 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 41 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 43 # number of demand (read+write) MSHR hits system.cpu.l2cache.overall_mshr_hits::cpu.inst 2 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 42 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 44 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.data 41 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 43 # number of overall MSHR hits system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 2854 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 2854 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3423 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3423 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3424 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3424 # number of ReadCleanReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 1309 # number of ReadSharedReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::total 1309 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 3423 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 3424 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.data 4163 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 7586 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 3423 # number of overall MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 7587 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 3424 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 4163 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 7586 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 186794500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 186794500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 222839000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 222839000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 88850500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 88850500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 222839000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 275645000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 498484000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 222839000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 275645000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 498484000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::total 7587 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 186436500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 186436500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 221700500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 221700500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 89390500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 89390500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 221700500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 275827000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 497527500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 221700500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 275827000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 497527500 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.994425 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.994425 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.085351 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.085351 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.085331 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.085331 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.797199 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.797199 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.085351 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.085331 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.922651 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.170025 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.085351 # mshr miss rate for overall accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.169967 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.085331 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.922651 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.170025 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 65450.070077 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 65450.070077 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 65100.496640 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 65100.496640 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 67876.623377 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 67876.623377 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 65100.496640 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66213.067499 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 65711.046665 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 65100.496640 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66213.067499 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 65711.046665 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 84140 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 39625 # Number of requests hitting in the snoop filter with a single holder of the requested data. -system.cpu.toL2Bus.snoop_filter.hit_multi_requests 15034 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::total 0.169967 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 65324.632095 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 65324.632095 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64748.977804 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64748.977804 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 68289.152024 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 68289.152024 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64748.977804 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66256.785972 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 65576.314749 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64748.977804 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66256.785972 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 65576.314749 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 84181 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 39645 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.hit_multi_requests 15035 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 41746 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 41767 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackDirty 1010 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 38168 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 38188 # Transaction distribution system.cpu.toL2Bus.trans_dist::CleanEvict 345 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 2870 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 2870 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 40105 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 40126 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadSharedReq 1642 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 118377 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 118439 # Packet count per connected master and slave (bytes) system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 10379 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 128756 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 5009408 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 128818 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 5012032 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 353408 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 5362816 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 5365440 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 44617 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.339243 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.473458 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 44638 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.339106 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.473411 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 29481 66.08% 66.08% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 15136 33.92% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 29501 66.09% 66.09% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 15137 33.91% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 44617 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 81248000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 44638 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 81288500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 60156998 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 60188498 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) system.cpu.toL2Bus.respLayer1.occupancy 6789457 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.0 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 211714953000 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 4732 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 225030243000 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 4733 # Transaction distribution system.membus.trans_dist::ReadExReq 2854 # Transaction distribution system.membus.trans_dist::ReadExResp 2854 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 4732 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 15172 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 15172 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 485504 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 485504 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 4733 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 15174 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 15174 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 485568 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 485568 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 7586 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 7587 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 7586 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 7587 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 7586 # Request fanout histogram -system.membus.reqLayer0.occupancy 8883500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 7587 # Request fanout histogram +system.membus.reqLayer0.occupancy 9083500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 40266000 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 40284000 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.0 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/30.eon/ref/arm/linux/o3-timing/config.ini b/tests/long/se/30.eon/ref/arm/linux/o3-timing/config.ini index a48b86389..d73a74668 100644 --- a/tests/long/se/30.eon/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/30.eon/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/eon gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/30.eon/ref/arm/linux/o3-timing/simerr b/tests/long/se/30.eon/ref/arm/linux/o3-timing/simerr index 613c6a6b7..3415c9346 100755 --- a/tests/long/se/30.eon/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/30.eon/ref/arm/linux/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick getting pixel output filename pixels_out.cook opening control file chair.control.cook opening camera file chair.camera diff --git a/tests/long/se/30.eon/ref/arm/linux/o3-timing/simout b/tests/long/se/30.eon/ref/arm/linux/o3-timing/simout index 572268607..7e2bba88d 100755 --- a/tests/long/se/30.eon/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/30.eon/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/30.eon/arm/linux/o3-timing/sim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 15 2016 19:53:43 -gem5 started Mar 15 2016 21:05:26 -gem5 executing on dinar2c11, pid 11410 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/30.eon/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:06:52 +gem5 executing on e108600-lin, pid 24264 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/30.eon/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/30.eon/ref/arm/linux/o3-timing/stats.txt index f64410488..cc1788f11 100644 --- a/tests/long/se/30.eon/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/30.eon/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.111754 # Nu sim_ticks 111753553500 # Number of ticks simulated final_tick 111753553500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 201687 # Simulator instruction rate (inst/s) -host_op_rate 242148 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 82550264 # Simulator tick rate (ticks/s) -host_mem_usage 334820 # Number of bytes of host memory used -host_seconds 1353.76 # Real time elapsed on the host +host_inst_rate 162111 # Simulator instruction rate (inst/s) +host_op_rate 194632 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 66351635 # Simulator tick rate (ticks/s) +host_mem_usage 287668 # Number of bytes of host memory used +host_seconds 1684.26 # Real time elapsed on the host sim_insts 273037220 # Number of instructions simulated sim_ops 327811602 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1165,6 +1165,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 197531008 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 290513216 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 134350 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 5056 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 2404477 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.192237 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.468638 # Request fanout histogram @@ -1193,6 +1194,7 @@ system.membus.pkt_count::total 169247 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 5415488 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 5415488 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 84630 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/config.ini b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/config.ini index f27ac4466..1b5061343 100644 --- a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/config.ini +++ b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -106,9 +116,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -122,9 +137,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -182,9 +202,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -198,9 +223,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -218,7 +248,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/eon gid=100 input=cin kvmInSE=false @@ -250,10 +280,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -268,11 +303,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simerr b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simerr index a25196116..c881283f7 100755 --- a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simerr +++ b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick getting pixel output filename pixels_out.cook opening control file chair.control.cook opening camera file chair.camera diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simout b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simout index a48a8bb5c..154af3aae 100755 --- a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simout +++ b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-atomic gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 00:56:31 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-atomic -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-atomic +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:54:12 +gem5 executing on e108600-lin, pid 23918 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/arm/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/stats.txt b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/stats.txt index ddaf7206c..e42324626 100644 --- a/tests/long/se/30.eon/ref/arm/linux/simple-atomic/stats.txt +++ b/tests/long/se/30.eon/ref/arm/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.201717 # Nu sim_ticks 201717314000 # Number of ticks simulated final_tick 201717314000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1421524 # Simulator instruction rate (inst/s) -host_op_rate 1706697 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1050207028 # Simulator tick rate (ticks/s) -host_mem_usage 310740 # Number of bytes of host memory used -host_seconds 192.07 # Real time elapsed on the host +host_inst_rate 732440 # Simulator instruction rate (inst/s) +host_op_rate 879375 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 541118678 # Simulator tick rate (ticks/s) +host_mem_usage 263976 # Number of bytes of host memory used +host_seconds 372.78 # Real time elapsed on the host sim_insts 273037595 # Number of instructions simulated sim_ops 327811950 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -237,6 +237,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 1394641096 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 880756979 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 2275398075 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 517024352 # Request fanout histogram system.membus.snoop_fanout::mean 0.674359 # Request fanout histogram system.membus.snoop_fanout::stdev 0.468614 # Request fanout histogram diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-timing/config.ini b/tests/long/se/30.eon/ref/arm/linux/simple-timing/config.ini index 72dade1ff..0faba130d 100644 --- a/tests/long/se/30.eon/ref/arm/linux/simple-timing/config.ini +++ b/tests/long/se/30.eon/ref/arm/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -90,12 +100,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -114,8 +129,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -138,9 +158,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -154,9 +179,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -167,12 +197,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +226,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -250,9 +290,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -266,9 +311,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -279,12 +329,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -303,8 +358,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -312,10 +372,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -346,7 +411,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/eon +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/eon gid=100 input=cin kvmInSE=false @@ -378,10 +443,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -396,11 +466,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-timing/simerr b/tests/long/se/30.eon/ref/arm/linux/simple-timing/simerr index a25196116..c881283f7 100755 --- a/tests/long/se/30.eon/ref/arm/linux/simple-timing/simerr +++ b/tests/long/se/30.eon/ref/arm/linux/simple-timing/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick getting pixel output filename pixels_out.cook opening control file chair.control.cook opening camera file chair.camera diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-timing/simout b/tests/long/se/30.eon/ref/arm/linux/simple-timing/simout index f8e2a4c4d..bd192fb8a 100755 --- a/tests/long/se/30.eon/ref/arm/linux/simple-timing/simout +++ b/tests/long/se/30.eon/ref/arm/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-timing gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 23:07:21 -gem5 started Mar 16 2016 23:13:40 -gem5 executing on dinar2c11, pid 25474 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:00:59 +gem5 executing on e108600-lin, pid 24143 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/30.eon/arm/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/30.eon/arm/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/30.eon/ref/arm/linux/simple-timing/stats.txt b/tests/long/se/30.eon/ref/arm/linux/simple-timing/stats.txt index ea2a43ab9..fd046e3e7 100644 --- a/tests/long/se/30.eon/ref/arm/linux/simple-timing/stats.txt +++ b/tests/long/se/30.eon/ref/arm/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.517291 # Nu sim_ticks 517291025500 # Number of ticks simulated final_tick 517291025500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 968617 # Simulator instruction rate (inst/s) -host_op_rate 1162861 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1837127354 # Simulator tick rate (ticks/s) -host_mem_usage 320856 # Number of bytes of host memory used -host_seconds 281.58 # Real time elapsed on the host +host_inst_rate 451771 # Simulator instruction rate (inst/s) +host_op_rate 542368 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 856851233 # Simulator tick rate (ticks/s) +host_mem_usage 273716 # Number of bytes of host memory used +host_seconds 603.71 # Real time elapsed on the host sim_insts 272739286 # Number of instructions simulated sim_ops 327433744 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -620,6 +620,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 350464 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 2232000 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 20081 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.386335 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.486921 # Request fanout histogram @@ -647,6 +648,7 @@ system.membus.pkt_count::total 13664 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 437248 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 437248 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 6833 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/config.ini index cd33c8a8d..ca9122542 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/perlbmk gid=100 input=cin kvmInSE=false @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simerr index 41d370561..8954fa36f 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simout b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simout index 0aa9c6519..b5d01fab2 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/minor gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:30:12 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4301 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -650,4 +650,4 @@ info: Increasing stack size by one page. 2000: 2845746745 1000: 2068042552 0: 290958364 -Exiting @ tick 560939659000 because target called exit() +Exiting @ tick 508215534000 because target called exit() diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/stats.txt index 383495cbc..f21f0115d 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/minor-timing/stats.txt @@ -1,70 +1,70 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.504258 # Number of seconds simulated -sim_ticks 504258263000 # Number of ticks simulated -final_tick 504258263000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.508216 # Number of seconds simulated +sim_ticks 508215534000 # Number of ticks simulated +final_tick 508215534000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 532728 # Simulator instruction rate (inst/s) -host_op_rate 532728 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 289228716 # Simulator tick rate (ticks/s) -host_mem_usage 306284 # Number of bytes of host memory used -host_seconds 1743.46 # Real time elapsed on the host +host_inst_rate 266071 # Simulator instruction rate (inst/s) +host_op_rate 266071 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 145588775 # Simulator tick rate (ticks/s) +host_mem_usage 258712 # Number of bytes of host memory used +host_seconds 3490.76 # Real time elapsed on the host sim_insts 928789150 # Number of instructions simulated sim_ops 928789150 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 185088 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 18520000 # Number of bytes read from this memory -system.physmem.bytes_read::total 18705088 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 185088 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 185088 # Number of instructions bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 185920 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 18520192 # Number of bytes read from this memory +system.physmem.bytes_read::total 18706112 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 185920 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 185920 # Number of instructions bytes read from this memory system.physmem.bytes_written::writebacks 4267712 # Number of bytes written to this memory system.physmem.bytes_written::total 4267712 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 2892 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 289375 # Number of read requests responded to by this memory -system.physmem.num_reads::total 292267 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.inst 2905 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 289378 # Number of read requests responded to by this memory +system.physmem.num_reads::total 292283 # Number of read requests responded to by this memory system.physmem.num_writes::writebacks 66683 # Number of write requests responded to by this memory system.physmem.num_writes::total 66683 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 367050 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 36727212 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 37094262 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 367050 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 367050 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 8463346 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 8463346 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 8463346 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 367050 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 36727212 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 45557607 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 292267 # Number of read requests accepted +system.physmem.bw_read::cpu.inst 365829 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 36441609 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 36807438 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 365829 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 365829 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 8397445 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 8397445 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 8397445 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 365829 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 36441609 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 45204883 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 292283 # Number of read requests accepted system.physmem.writeReqs 66683 # Number of write requests accepted -system.physmem.readBursts 292267 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 292283 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 66683 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 18685248 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 19840 # Total number of bytes read from write queue -system.physmem.bytesWritten 4266176 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 18705088 # Total read bytes from the system interface side +system.physmem.bytesReadDRAM 18687040 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 19072 # Total number of bytes read from write queue +system.physmem.bytesWritten 4265984 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 18706112 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 4267712 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 310 # Number of DRAM read bursts serviced by the write queue +system.physmem.servicedByWrQ 298 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 18033 # Per bank write bursts -system.physmem.perBankRdBursts::1 18363 # Per bank write bursts -system.physmem.perBankRdBursts::2 18394 # Per bank write bursts -system.physmem.perBankRdBursts::3 18341 # Per bank write bursts -system.physmem.perBankRdBursts::4 18245 # Per bank write bursts -system.physmem.perBankRdBursts::5 18249 # Per bank write bursts -system.physmem.perBankRdBursts::6 18313 # Per bank write bursts -system.physmem.perBankRdBursts::7 18290 # Per bank write bursts -system.physmem.perBankRdBursts::8 18231 # Per bank write bursts -system.physmem.perBankRdBursts::9 18232 # Per bank write bursts -system.physmem.perBankRdBursts::10 18229 # Per bank write bursts -system.physmem.perBankRdBursts::11 18376 # Per bank write bursts -system.physmem.perBankRdBursts::12 18272 # Per bank write bursts -system.physmem.perBankRdBursts::13 18137 # Per bank write bursts -system.physmem.perBankRdBursts::14 18064 # Per bank write bursts -system.physmem.perBankRdBursts::15 18188 # Per bank write bursts +system.physmem.perBankRdBursts::0 18032 # Per bank write bursts +system.physmem.perBankRdBursts::1 18362 # Per bank write bursts +system.physmem.perBankRdBursts::2 18398 # Per bank write bursts +system.physmem.perBankRdBursts::3 18335 # Per bank write bursts +system.physmem.perBankRdBursts::4 18250 # Per bank write bursts +system.physmem.perBankRdBursts::5 18255 # Per bank write bursts +system.physmem.perBankRdBursts::6 18321 # Per bank write bursts +system.physmem.perBankRdBursts::7 18295 # Per bank write bursts +system.physmem.perBankRdBursts::8 18232 # Per bank write bursts +system.physmem.perBankRdBursts::9 18236 # Per bank write bursts +system.physmem.perBankRdBursts::10 18232 # Per bank write bursts +system.physmem.perBankRdBursts::11 18379 # Per bank write bursts +system.physmem.perBankRdBursts::12 18271 # Per bank write bursts +system.physmem.perBankRdBursts::13 18134 # Per bank write bursts +system.physmem.perBankRdBursts::14 18060 # Per bank write bursts +system.physmem.perBankRdBursts::15 18193 # Per bank write bursts system.physmem.perBankWrBursts::0 4125 # Per bank write bursts system.physmem.perBankWrBursts::1 4164 # Per bank write bursts system.physmem.perBankWrBursts::2 4223 # Per bank write bursts @@ -74,7 +74,7 @@ system.physmem.perBankWrBursts::5 4099 # Pe system.physmem.perBankWrBursts::6 4262 # Per bank write bursts system.physmem.perBankWrBursts::7 4226 # Per bank write bursts system.physmem.perBankWrBursts::8 4233 # Per bank write bursts -system.physmem.perBankWrBursts::9 4183 # Per bank write bursts +system.physmem.perBankWrBursts::9 4180 # Per bank write bursts system.physmem.perBankWrBursts::10 4150 # Per bank write bursts system.physmem.perBankWrBursts::11 4241 # Per bank write bursts system.physmem.perBankWrBursts::12 4098 # Per bank write bursts @@ -83,14 +83,14 @@ system.physmem.perBankWrBursts::14 4096 # Pe system.physmem.perBankWrBursts::15 4157 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 504258181000 # Total gap between requests +system.physmem.totGap 508215452500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 292267 # Read request sizes (log2) +system.physmem.readPktSize::6 292283 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -98,9 +98,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 66683 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 291455 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 474 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 28 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 291508 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 465 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 12 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -147,23 +147,23 @@ system.physmem.wrQLenPdf::13 1 # Wh system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::15 936 # What write queue length does an incoming req see system.physmem.wrQLenPdf::16 937 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 4045 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 4046 # What write queue length does an incoming req see system.physmem.wrQLenPdf::18 4050 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 4051 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::20 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::21 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::22 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::23 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::24 4050 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 4049 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 4049 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 4052 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 4050 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 4050 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::28 4049 # What write queue length does an incoming req see system.physmem.wrQLenPdf::29 4051 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 4051 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 4050 # What write queue length does an incoming req see system.physmem.wrQLenPdf::31 4049 # What write queue length does an incoming req see system.physmem.wrQLenPdf::32 4049 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::34 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::35 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::36 0 # What write queue length does an incoming req see @@ -194,126 +194,123 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 103155 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 222.473443 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 144.311324 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 268.647767 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 37345 36.20% 36.20% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 43741 42.40% 78.61% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 9241 8.96% 87.56% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 735 0.71% 88.28% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 1396 1.35% 89.63% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 1157 1.12% 90.75% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 662 0.64% 91.39% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 564 0.55% 91.94% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 8314 8.06% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 103155 # Bytes accessed per row activation +system.physmem.bytesPerActivate::samples 103603 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 221.521925 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 143.541969 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 268.372247 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 37864 36.55% 36.55% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 43808 42.28% 78.83% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 9097 8.78% 87.61% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 745 0.72% 88.33% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 1395 1.35% 89.68% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 1153 1.11% 90.79% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 627 0.61% 91.40% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 610 0.59% 91.98% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 8304 8.02% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 103603 # Bytes accessed per row activation system.physmem.rdPerTurnAround::samples 4049 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 69.893801 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::gmean 34.549322 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 747.524050 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 69.361324 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::gmean 34.573478 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 739.455375 # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::0-1023 4041 99.80% 99.80% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::1024-2047 1 0.02% 99.83% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::10240-11263 1 0.02% 99.85% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::13312-14335 2 0.05% 99.90% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::14336-15359 1 0.02% 99.93% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::15360-16383 1 0.02% 99.95% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::16384-17407 1 0.02% 99.98% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::8192-9215 1 0.02% 99.85% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::14336-15359 4 0.10% 99.95% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::15360-16383 1 0.02% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::30720-31743 1 0.02% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::total 4049 # Reads before turning the bus around for writes system.physmem.wrPerTurnAround::samples 4049 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 16.463077 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 16.442287 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 0.845052 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 16.462336 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 16.441628 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 0.843264 # Writes before turning the bus around for reads system.physmem.wrPerTurnAround::16 3113 76.88% 76.88% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 933 23.04% 99.93% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::19 3 0.07% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 936 23.12% 100.00% # Writes before turning the bus around for reads system.physmem.wrPerTurnAround::total 4049 # Writes before turning the bus around for reads -system.physmem.totQLat 3567632750 # Total ticks spent queuing -system.physmem.totMemAccLat 9041826500 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 1459785000 # Total ticks spent in databus transfers -system.physmem.avgQLat 12219.72 # Average queueing delay per DRAM burst +system.physmem.totQLat 2518388500 # Total ticks spent queuing +system.physmem.totMemAccLat 7993107250 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 1459925000 # Total ticks spent in databus transfers +system.physmem.avgQLat 8625.06 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 30969.72 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 37.05 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 8.46 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 37.09 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 8.46 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 27375.06 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 36.77 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 8.39 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 36.81 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 8.40 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 0.36 # Data bus utilization in percentage +system.physmem.busUtil 0.35 # Data bus utilization in percentage system.physmem.busUtilRead 0.29 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.07 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing -system.physmem.avgWrQLen 24.27 # Average write queue length when enqueuing -system.physmem.readRowHits 203404 # Number of row buffer hits during reads -system.physmem.writeRowHits 52048 # Number of row buffer hits during writes -system.physmem.readRowHitRate 69.67 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 78.05 # Row buffer hit rate for writes -system.physmem.avgGap 1404814.55 # Average gap between requests -system.physmem.pageHitRate 71.23 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 388939320 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 212218875 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 1140243000 # Energy for read commands per rank (pJ) +system.physmem.avgWrQLen 24.41 # Average write queue length when enqueuing +system.physmem.readRowHits 203026 # Number of row buffer hits during reads +system.physmem.writeRowHits 52001 # Number of row buffer hits during writes +system.physmem.readRowHitRate 69.53 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 77.98 # Row buffer hit rate for writes +system.physmem.avgGap 1415776.01 # Average gap between requests +system.physmem.pageHitRate 71.10 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 390708360 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 213184125 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 1140250800 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 216438480 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 32935362720 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 104730111945 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 210683510250 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 350306824590 # Total energy per rank (pJ) -system.physmem_0.averagePower 694.703966 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 349825620000 # Time in different power states -system.physmem_0.memoryStateTime::REF 16838120000 # Time in different power states +system.physmem_0.refreshEnergy 33193711200 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 103572972045 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 214071794250 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 352799059260 # Total energy per rank (pJ) +system.physmem_0.averagePower 694.201008 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 355459552750 # Time in different power states +system.physmem_0.memoryStateTime::REF 16970200000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 137589656250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 135779058500 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 390829320 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 213250125 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 1136538000 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 215511840 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 32935362720 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 105447215835 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 210054471750 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 350393179590 # Total energy per rank (pJ) -system.physmem_1.averagePower 694.875219 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 348773258750 # Time in different power states -system.physmem_1.memoryStateTime::REF 16838120000 # Time in different power states +system.physmem_1.actEnergy 392424480 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 214120500 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 1136545800 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 215492400 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 33193711200 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 103467236760 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 214164544500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 352784075640 # Total energy per rank (pJ) +system.physmem_1.averagePower 694.171524 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 355611467750 # Time in different power states +system.physmem_1.memoryStateTime::REF 16970200000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 138643034750 # Time in different power states +system.physmem_1.memoryStateTime::ACT 135627775750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 123840342 # Number of BP lookups -system.cpu.branchPred.condPredicted 79869322 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 685088 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 102061444 # Number of BTB lookups -system.cpu.branchPred.BTBHits 68186680 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 123851653 # Number of BP lookups +system.cpu.branchPred.condPredicted 79872946 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 686743 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 102066131 # Number of BTB lookups +system.cpu.branchPred.BTBHits 68190141 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 66.809441 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 18691358 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 9446 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 14052117 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 14048642 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 3475 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 11780 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 66.809764 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 18697400 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 11224 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 14052177 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 14048616 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 3561 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 11655 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 237538322 # DTB read hits -system.cpu.dtb.read_misses 198467 # DTB read misses +system.cpu.dtb.read_hits 237539296 # DTB read hits +system.cpu.dtb.read_misses 195211 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 237736789 # DTB read accesses -system.cpu.dtb.write_hits 98305180 # DTB write hits -system.cpu.dtb.write_misses 7178 # DTB write misses +system.cpu.dtb.read_accesses 237734507 # DTB read accesses +system.cpu.dtb.write_hits 98305020 # DTB write hits +system.cpu.dtb.write_misses 7170 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 98312358 # DTB write accesses -system.cpu.dtb.data_hits 335843502 # DTB hits -system.cpu.dtb.data_misses 205645 # DTB misses +system.cpu.dtb.write_accesses 98312190 # DTB write accesses +system.cpu.dtb.data_hits 335844316 # DTB hits +system.cpu.dtb.data_misses 202381 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 336049147 # DTB accesses -system.cpu.itb.fetch_hits 285763790 # ITB hits +system.cpu.dtb.data_accesses 336046697 # DTB accesses +system.cpu.itb.fetch_hits 286584409 # ITB hits system.cpu.itb.fetch_misses 119 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 285763909 # ITB accesses +system.cpu.itb.fetch_accesses 286584528 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -327,16 +324,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 37 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 1008516526 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 1016431068 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 928789150 # Number of instructions committed system.cpu.committedOps 928789150 # Number of ops (including micro ops) committed -system.cpu.discardedOps 316849 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 319592 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.085840 # CPI: cycles per instruction -system.cpu.ipc 0.920946 # IPC: instructions per cycle +system.cpu.cpi 1.094361 # CPI: cycles per instruction +system.cpu.ipc 0.913775 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 86206875 9.28% 9.28% # Class of committed instruction system.cpu.op_class_0::IntAlu 486529511 52.38% 61.66% # Class of committed instruction system.cpu.op_class_0::IntMult 7040 0.00% 61.67% # Class of committed instruction @@ -372,316 +369,316 @@ system.cpu.op_class_0::MemWrite 98308071 10.58% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 928789150 # Class of committed instruction -system.cpu.tickCycles 957154131 # Number of cycles that the object actually ticked -system.cpu.idleCycles 51362395 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 776530 # number of replacements -system.cpu.dcache.tags.tagsinuse 4092.342308 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 321596153 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 780626 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 411.972126 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 901583500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4092.342308 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.999107 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.999107 # Average percentage of cache occupancy +system.cpu.tickCycles 962815750 # Number of cycles that the object actually ticked +system.cpu.idleCycles 53615318 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 776559 # number of replacements +system.cpu.dcache.tags.tagsinuse 4092.348104 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 320318733 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 780655 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 410.320478 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 905242500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4092.348104 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.999108 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.999108 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 56 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 214 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 956 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 1398 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::4 1472 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 213 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 955 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 1381 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::4 1491 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 645671096 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 645671096 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 223432106 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 223432106 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 98164047 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 98164047 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 321596153 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 321596153 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 321596153 # number of overall hits -system.cpu.dcache.overall_hits::total 321596153 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 711929 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 711929 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 137153 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 137153 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 849082 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 849082 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 849082 # number of overall misses -system.cpu.dcache.overall_misses::total 849082 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 25457059500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 25457059500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 10110916000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 10110916000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 35567975500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 35567975500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 35567975500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 35567975500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 224144035 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 224144035 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.tags.tag_accesses 643115729 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 643115729 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 222154684 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 222154684 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 98164049 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 98164049 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 320318733 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 320318733 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 320318733 # number of overall hits +system.cpu.dcache.overall_hits::total 320318733 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 711653 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 711653 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 137151 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 137151 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 848804 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 848804 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 848804 # number of overall misses +system.cpu.dcache.overall_misses::total 848804 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 24412597000 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 24412597000 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 10105115500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 10105115500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 34517712500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 34517712500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 34517712500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 34517712500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 222866337 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 222866337 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 98301200 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 98301200 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 322445235 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 322445235 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 322445235 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 322445235 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.003176 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.003176 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 321167537 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 321167537 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 321167537 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 321167537 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.003193 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.003193 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.001395 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.001395 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.002633 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.002633 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.002633 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.002633 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 35757.862792 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 35757.862792 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73719.976960 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 73719.976960 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 41889.918170 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 41889.918170 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 41889.918170 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 41889.918170 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.002643 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.002643 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.002643 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.002643 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 34304.073755 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 34304.073755 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73678.759178 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 73678.759178 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 40666.293396 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 40666.293396 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 40666.293396 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 40666.293396 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 88489 # number of writebacks -system.cpu.dcache.writebacks::total 88489 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 314 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 314 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 68142 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 68142 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 68456 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 68456 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 68456 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 68456 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 711615 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 711615 # number of ReadReq MSHR misses +system.cpu.dcache.writebacks::writebacks 88481 # number of writebacks +system.cpu.dcache.writebacks::total 88481 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 9 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 9 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 68140 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 68140 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 68149 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 68149 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 68149 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 68149 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 711644 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 711644 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 69011 # number of WriteReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::total 69011 # number of WriteReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 780626 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 780626 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 780626 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 780626 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 24738054000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 24738054000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5071007000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 5071007000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 29809061000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 29809061000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 29809061000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 29809061000 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.003175 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.003175 # mshr miss rate for ReadReq accesses +system.cpu.dcache.demand_mshr_misses::cpu.data 780655 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 780655 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 780655 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 780655 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 23700262500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 23700262500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5068010000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 5068010000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 28768272500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 28768272500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 28768272500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 28768272500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.003193 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.003193 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000702 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.000702 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.002421 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.002421 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.002421 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.002421 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 34763.255412 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 34763.255412 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73481.140688 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73481.140688 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 38186.098080 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 38186.098080 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 38186.098080 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 38186.098080 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 10567 # number of replacements -system.cpu.icache.tags.tagsinuse 1686.158478 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 285751480 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 12309 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 23214.841173 # Average number of references to valid blocks. +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.002431 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.002431 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.002431 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.002431 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 33303.537302 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 33303.537302 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73437.712828 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73437.712828 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 36851.454868 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 36851.454868 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 36851.454868 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 36851.454868 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 10580 # number of replacements +system.cpu.icache.tags.tagsinuse 1690.197843 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 286572082 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 12326 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 23249.398183 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1686.158478 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.823320 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.823320 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1742 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 62 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 103 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 1690.197843 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.825292 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.825292 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1746 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 61 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 105 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::2 2 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 1 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1574 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.850586 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 571539889 # Number of tag accesses -system.cpu.icache.tags.data_accesses 571539889 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 285751480 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 285751480 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 285751480 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 285751480 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 285751480 # number of overall hits -system.cpu.icache.overall_hits::total 285751480 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 12310 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 12310 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 12310 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 12310 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 12310 # number of overall misses -system.cpu.icache.overall_misses::total 12310 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 352350500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 352350500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 352350500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 352350500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 352350500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 352350500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 285763790 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 285763790 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 285763790 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 285763790 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 285763790 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 285763790 # number of overall (read+write) accesses +system.cpu.icache.tags.age_task_id_blocks_1024::3 2 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1576 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.852539 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 573181144 # Number of tag accesses +system.cpu.icache.tags.data_accesses 573181144 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 286572082 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 286572082 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 286572082 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 286572082 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 286572082 # number of overall hits +system.cpu.icache.overall_hits::total 286572082 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 12327 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 12327 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 12327 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 12327 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 12327 # number of overall misses +system.cpu.icache.overall_misses::total 12327 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 353123500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 353123500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 353123500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 353123500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 353123500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 353123500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 286584409 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 286584409 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 286584409 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 286584409 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 286584409 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 286584409 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000043 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000043 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000043 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000043 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000043 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000043 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 28623.111292 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 28623.111292 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 28623.111292 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 28623.111292 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 28623.111292 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 28623.111292 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 28646.345421 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 28646.345421 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 28646.345421 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 28646.345421 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 28646.345421 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 28646.345421 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 10567 # number of writebacks -system.cpu.icache.writebacks::total 10567 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 12310 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 12310 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 12310 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 12310 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 12310 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 12310 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 340041500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 340041500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 340041500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 340041500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 340041500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 340041500 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 10580 # number of writebacks +system.cpu.icache.writebacks::total 10580 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 12327 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 12327 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 12327 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 12327 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 12327 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 12327 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 340797500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 340797500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 340797500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 340797500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 340797500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 340797500 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000043 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000043 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000043 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000043 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000043 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000043 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 27623.192526 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 27623.192526 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 27623.192526 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 27623.192526 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 27623.192526 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 27623.192526 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 259940 # number of replacements -system.cpu.l2cache.tags.tagsinuse 32579.649991 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 1218214 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 292676 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 4.162330 # Average number of references to valid blocks. +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 27646.426543 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 27646.426543 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 27646.426543 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 27646.426543 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 27646.426543 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 27646.426543 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 259960 # number of replacements +system.cpu.l2cache.tags.tagsinuse 32580.630666 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 1218282 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 292696 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 4.162278 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 2630.640415 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 79.297977 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 29869.711599 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.080281 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.002420 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.911551 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.994252 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_blocks::writebacks 2624.989355 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 79.480782 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 29876.160528 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.080108 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.002426 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.911748 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.994282 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 32736 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 154 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 280 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 305 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 2976 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 29021 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 153 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 282 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 302 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 2944 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 29055 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.999023 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 13001951 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 13001951 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 88489 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 88489 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 10567 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 10567 # number of WritebackClean hits +system.cpu.l2cache.tags.tag_accesses 13002675 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 13002675 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 88481 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 88481 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 10580 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 10580 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 2366 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 2366 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 9417 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 9417 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 488885 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 488885 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 9417 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 491251 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 500668 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 9417 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 491251 # number of overall hits -system.cpu.l2cache.overall_hits::total 500668 # number of overall hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 9421 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 9421 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 488911 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 488911 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 9421 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 491277 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 500698 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 9421 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 491277 # number of overall hits +system.cpu.l2cache.overall_hits::total 500698 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 66645 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 66645 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2893 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 2893 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 222730 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 222730 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 2893 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 289375 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 292268 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 2893 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 289375 # number of overall misses -system.cpu.l2cache.overall_misses::total 292268 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 4942620000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 4942620000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 222699500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 222699500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 18537323500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 18537323500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 222699500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 23479943500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 23702643000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 222699500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 23479943500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 23702643000 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 88489 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 88489 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 10567 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 10567 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2906 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 2906 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 222733 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 222733 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 2906 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 289378 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 292284 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 2906 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 289378 # number of overall misses +system.cpu.l2cache.overall_misses::total 292284 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 4939623000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 4939623000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 223388000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 223388000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 17499220000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 17499220000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 223388000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 22438843000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 22662231000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 223388000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 22438843000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 22662231000 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 88481 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 88481 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 10580 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 10580 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 69011 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 69011 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 12310 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 12310 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 711615 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 711615 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 12310 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 780626 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 792936 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 12310 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 780626 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 792936 # number of overall (read+write) accesses +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 12327 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 12327 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 711644 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 711644 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 12327 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 780655 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 792982 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 12327 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 780655 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 792982 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.965716 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.965716 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.235012 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.235012 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.312992 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.312992 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.235012 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.370696 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.368590 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.235012 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.370696 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.368590 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74163.403106 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74163.403106 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 76978.741791 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 76978.741791 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 83227.780272 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 83227.780272 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 76978.741791 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 81140.193521 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 81099.001601 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 76978.741791 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 81140.193521 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 81099.001601 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.235743 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.235743 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.312984 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.312984 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.235743 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.370686 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.368588 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.235743 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.370686 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.368588 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74118.433491 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74118.433491 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 76871.300757 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 76871.300757 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 78565.906264 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 78565.906264 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 76871.300757 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 77541.634126 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 77534.969413 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 76871.300757 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 77541.634126 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 77534.969413 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -694,118 +691,120 @@ system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 1 system.cpu.l2cache.CleanEvict_mshr_misses::total 1 # number of CleanEvict MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 66645 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 66645 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2893 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2893 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 222730 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 222730 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 2893 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 289375 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 292268 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 2893 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 289375 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 292268 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4276170000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4276170000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 193779500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 193779500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 16310023500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 16310023500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 193779500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 20586193500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 20779973000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 193779500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 20586193500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 20779973000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2906 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2906 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 222733 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 222733 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 2906 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 289378 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 292284 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 2906 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 289378 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 292284 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4273173000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4273173000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 194338000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 194338000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 15271890000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 15271890000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 194338000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 19545063000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 19739401000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 194338000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 19545063000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 19739401000 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.965716 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.965716 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.235012 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.235012 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.312992 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.312992 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.235012 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.370696 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.368590 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.235012 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.370696 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.368590 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64163.403106 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64163.403106 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 66982.198410 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 66982.198410 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 73227.780272 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 73227.780272 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 66982.198410 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 71140.193521 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 71099.035816 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 66982.198410 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 71140.193521 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 71099.035816 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 1580033 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 787097 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.235743 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.235743 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.312984 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.312984 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.235743 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.370686 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.368588 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.235743 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.370686 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.368588 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64118.433491 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64118.433491 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 66874.741913 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 66874.741913 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 68565.906264 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 68565.906264 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 66874.741913 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 67541.634126 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 67535.003627 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 66874.741913 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 67541.634126 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 67535.003627 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 1580121 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 787139 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 2081 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2081 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 2087 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2087 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 723924 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 155172 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 10567 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 881298 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 723970 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 155164 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 10580 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 881355 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 69011 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 69011 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 12310 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 711615 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 35186 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2337782 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 2372968 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1464064 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55623360 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 57087424 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 259940 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 1052876 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.001976 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.044414 # Request fanout histogram +system.cpu.toL2Bus.trans_dist::ReadCleanReq 12327 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 711644 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 35233 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2337869 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 2373102 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1465984 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55624704 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 57090688 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 259960 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4267712 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 1052942 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.001982 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.044476 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 1050795 99.80% 99.80% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 2081 0.20% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 1050855 99.80% 99.80% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 2087 0.20% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 1052876 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 889072500 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 1052942 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 889121500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.2 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 18463500 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 18489000 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 1170939499 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 1170982999 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.2 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 504258263000 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 225622 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 508215534000 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 225638 # Transaction distribution system.membus.trans_dist::WritebackDirty 66683 # Transaction distribution -system.membus.trans_dist::CleanEvict 191176 # Transaction distribution +system.membus.trans_dist::CleanEvict 191190 # Transaction distribution system.membus.trans_dist::ReadExReq 66645 # Transaction distribution system.membus.trans_dist::ReadExResp 66645 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 225622 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 842393 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 842393 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22972800 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 22972800 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 225638 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 842439 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 842439 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22973824 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 22973824 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 550126 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 550156 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 550126 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 550156 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 550126 # Request fanout histogram -system.membus.reqLayer0.occupancy 918516000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 550156 # Request fanout histogram +system.membus.reqLayer0.occupancy 925402000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.2 # Layer utilization (%) -system.membus.respLayer1.occupancy 1556053500 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 1556718500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.3 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/config.ini b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/config.ini index 0cac95bfa..0e87d435d 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -68,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -104,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -143,11 +157,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -155,13 +176,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -171,6 +197,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -179,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -502,13 +534,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -518,6 +555,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -526,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -551,13 +594,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -567,6 +615,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -575,19 +624,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -595,6 +656,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -609,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/perlbmk gid=100 input=cin kvmInSE=false @@ -641,9 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -687,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -698,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simerr b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simerr index 41d370561..8954fa36f 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simout b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simout index c3e095b5a..8e7b7a0be 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simout +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/o3-ti gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 20:54:31 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/o3-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4303 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/alpha/tru64/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -650,4 +650,4 @@ info: Increasing stack size by one page. 2000: 2845746745 1000: 2068042552 0: 290958364 -Exiting @ tick 276406029500 because target called exit() +Exiting @ tick 174766258500 because target called exit() diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/stats.txt b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/stats.txt index 82cf197ab..577d97331 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.174766 # Nu sim_ticks 174766258500 # Number of ticks simulated final_tick 174766258500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 383088 # Simulator instruction rate (inst/s) -host_op_rate 383088 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 79477968 # Simulator tick rate (ticks/s) -host_mem_usage 307308 # Number of bytes of host memory used -host_seconds 2198.93 # Real time elapsed on the host +host_inst_rate 215097 # Simulator instruction rate (inst/s) +host_op_rate 215097 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 44625570 # Simulator tick rate (ticks/s) +host_mem_usage 260248 # Number of bytes of host memory used +host_seconds 3916.28 # Real time elapsed on the host sim_insts 842382029 # Number of instructions simulated sim_ops 842382029 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1019,6 +1019,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55639552 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 56339648 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 259794 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4267648 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 1046881 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.001913 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.043699 # Request fanout histogram @@ -1048,6 +1049,7 @@ system.membus.pkt_count::total 842124 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22966272 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 22966272 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 549958 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/config.ini b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/config.ini index bcb4e48fb..0452b264c 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/config.ini +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=atomic mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -67,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -114,7 +128,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/perlbmk gid=100 input=cin kvmInSE=false @@ -146,9 +160,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -163,11 +183,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simerr b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simerr index cf5d2b5cc..54e31201a 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simerr +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simout b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simout index 0dd51a4d4..3f843823b 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simout +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simpl gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jul 3 2015 14:54:12 -gem5 started Jul 3 2015 15:11:16 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-atomic -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-atomic +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:28 +gem5 executing on e108600-lin, pid 4304 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/alpha/tru64/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/stats.txt b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/stats.txt index 31542f021..b6b81e33b 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/stats.txt +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.464395 # Nu sim_ticks 464394627000 # Number of ticks simulated final_tick 464394627000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 3142131 # Simulator instruction rate (inst/s) -host_op_rate 3142131 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1571406745 # Simulator tick rate (ticks/s) -host_mem_usage 294224 # Number of bytes of host memory used -host_seconds 295.53 # Real time elapsed on the host +host_inst_rate 2033284 # Simulator instruction rate (inst/s) +host_op_rate 2033284 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1016862727 # Simulator tick rate (ticks/s) +host_mem_usage 248468 # Number of bytes of host memory used +host_seconds 456.69 # Real time elapsed on the host sim_insts 928587629 # Number of instructions simulated sim_ops 928587629 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -142,6 +142,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 3715156600 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 2394805239 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 6109961839 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 1264600947 # Request fanout histogram system.membus.snoop_fanout::mean 0.734452 # Request fanout histogram system.membus.snoop_fanout::stdev 0.441624 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/config.ini b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/config.ini index 82e107e36..b6ac9fa01 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -83,13 +97,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -99,6 +118,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -107,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -123,13 +148,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -139,6 +169,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -147,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -172,13 +208,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -188,6 +229,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -196,19 +238,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -216,6 +270,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -230,7 +291,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/perlbmk gid=100 input=cin kvmInSE=false @@ -262,9 +323,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -279,11 +346,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simerr b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simerr index cf5d2b5cc..54e31201a 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simout b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simout index 9bc789b35..6564d4aeb 100755 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simout +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simpl gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jul 3 2015 14:54:12 -gem5 started Jul 3 2015 15:04:10 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4305 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/40.perlbmk/alpha/tru64/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/alpha/tru64/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -650,4 +650,4 @@ info: Increasing stack size by one page. 2000: 2845746745 1000: 2068042552 0: 290958364 -Exiting @ tick 1286278511500 because target called exit() +Exiting @ tick 1288319411500 because target called exit() diff --git a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/stats.txt b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/stats.txt index ba8d8610f..f13a4ce2b 100644 --- a/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/alpha/tru64/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 1.288319 # Nu sim_ticks 1288319411500 # Number of ticks simulated final_tick 1288319411500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1791468 # Simulator instruction rate (inst/s) -host_op_rate 1791468 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2485477121 # Simulator tick rate (ticks/s) -host_mem_usage 303212 # Number of bytes of host memory used -host_seconds 518.34 # Real time elapsed on the host +host_inst_rate 1112167 # Simulator instruction rate (inst/s) +host_op_rate 1112167 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1543016447 # Simulator tick rate (ticks/s) +host_mem_usage 257436 # Number of bytes of host memory used +host_seconds 834.94 # Real time elapsed on the host sim_insts 928587629 # Number of instructions simulated sim_ops 928587629 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -512,6 +512,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55641216 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 56331520 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 258847 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4267712 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 1045543 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.001643 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.040503 # Request fanout histogram @@ -541,6 +542,7 @@ system.membus.pkt_count::total 839908 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22916608 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 22916608 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 548519 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/config.ini b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/config.ini index bd7f67190..4149684ba 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -99,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -120,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -132,13 +154,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -148,6 +175,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -156,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -180,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -196,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -591,13 +634,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -607,6 +655,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -615,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -626,6 +680,7 @@ eventq_index=0 [system.cpu.isa] type=ArmISA +decoderFlavour=Generic eventq_index=0 fpsid=1090793632 id_aa64afr0_el1=0 @@ -673,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -689,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -701,13 +766,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -717,6 +787,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -725,19 +796,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -745,6 +828,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -759,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/perlbmk gid=100 input=cin kvmInSE=false @@ -791,9 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -837,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -848,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simerr b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simerr index 2e6ab1e7e..c1f3592f9 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: fcntl64(3, 2) passed through to host diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simout b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simout index d77f0dbd5..99e686564 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/minor-tim gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 03:24:21 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:40:10 +gem5 executing on e108600-lin, pid 23109 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -650,4 +650,4 @@ info: Increasing stack size by one page. 2000: 2845746745 1000: 2068042552 0: 290958364 -Exiting @ tick 542257602500 because target called exit() +Exiting @ tick 512588680500 because target called exit() diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/stats.txt index eb3e6af6a..031a11fd6 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/arm/linux/minor-timing/stats.txt @@ -1,96 +1,96 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.489946 # Number of seconds simulated -sim_ticks 489945697500 # Number of ticks simulated -final_tick 489945697500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.512589 # Number of seconds simulated +sim_ticks 512588680500 # Number of ticks simulated +final_tick 512588680500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 287135 # Simulator instruction rate (inst/s) -host_op_rate 353501 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 219588415 # Simulator tick rate (ticks/s) -host_mem_usage 322476 # Number of bytes of host memory used -host_seconds 2231.20 # Real time elapsed on the host +host_inst_rate 180394 # Simulator instruction rate (inst/s) +host_op_rate 222088 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 144333179 # Simulator tick rate (ticks/s) +host_mem_usage 275860 # Number of bytes of host memory used +host_seconds 3551.43 # Real time elapsed on the host sim_insts 640655085 # Number of instructions simulated sim_ops 788730744 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 163712 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 18473856 # Number of bytes read from this memory -system.physmem.bytes_read::total 18637568 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 163712 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 163712 # Number of instructions bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 164160 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 18474048 # Number of bytes read from this memory +system.physmem.bytes_read::total 18638208 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 164160 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 164160 # Number of instructions bytes read from this memory system.physmem.bytes_written::writebacks 4230272 # Number of bytes written to this memory system.physmem.bytes_written::total 4230272 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 2558 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 288654 # Number of read requests responded to by this memory -system.physmem.num_reads::total 291212 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.inst 2565 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 288657 # Number of read requests responded to by this memory +system.physmem.num_reads::total 291222 # Number of read requests responded to by this memory system.physmem.num_writes::writebacks 66098 # Number of write requests responded to by this memory system.physmem.num_writes::total 66098 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 334143 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 37705926 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 38040069 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 334143 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 334143 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 8634165 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 8634165 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 8634165 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 334143 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 37705926 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 46674234 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 291212 # Number of read requests accepted +system.physmem.bw_read::cpu.inst 320257 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 36040687 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 36360943 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 320257 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 320257 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 8252761 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 8252761 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 8252761 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 320257 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 36040687 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 44613705 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 291222 # Number of read requests accepted system.physmem.writeReqs 66098 # Number of write requests accepted -system.physmem.readBursts 291212 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 291222 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 66098 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 18617024 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 20544 # Total number of bytes read from write queue +system.physmem.bytesReadDRAM 18617600 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 20608 # Total number of bytes read from write queue system.physmem.bytesWritten 4228864 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 18637568 # Total read bytes from the system interface side +system.physmem.bytesReadSys 18638208 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 4230272 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 321 # Number of DRAM read bursts serviced by the write queue +system.physmem.servicedByWrQ 322 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 18282 # Per bank write bursts -system.physmem.perBankRdBursts::1 18130 # Per bank write bursts -system.physmem.perBankRdBursts::2 18217 # Per bank write bursts +system.physmem.perBankRdBursts::0 18288 # Per bank write bursts +system.physmem.perBankRdBursts::1 18133 # Per bank write bursts +system.physmem.perBankRdBursts::2 18220 # Per bank write bursts system.physmem.perBankRdBursts::3 18178 # Per bank write bursts -system.physmem.perBankRdBursts::4 18288 # Per bank write bursts -system.physmem.perBankRdBursts::5 18411 # Per bank write bursts -system.physmem.perBankRdBursts::6 18177 # Per bank write bursts -system.physmem.perBankRdBursts::7 17990 # Per bank write bursts -system.physmem.perBankRdBursts::8 18028 # Per bank write bursts -system.physmem.perBankRdBursts::9 18056 # Per bank write bursts -system.physmem.perBankRdBursts::10 18107 # Per bank write bursts -system.physmem.perBankRdBursts::11 18202 # Per bank write bursts -system.physmem.perBankRdBursts::12 18216 # Per bank write bursts -system.physmem.perBankRdBursts::13 18274 # Per bank write bursts +system.physmem.perBankRdBursts::4 18281 # Per bank write bursts +system.physmem.perBankRdBursts::5 18410 # Per bank write bursts +system.physmem.perBankRdBursts::6 18174 # Per bank write bursts +system.physmem.perBankRdBursts::7 17993 # Per bank write bursts +system.physmem.perBankRdBursts::8 18029 # Per bank write bursts +system.physmem.perBankRdBursts::9 18057 # Per bank write bursts +system.physmem.perBankRdBursts::10 18103 # Per bank write bursts +system.physmem.perBankRdBursts::11 18205 # Per bank write bursts +system.physmem.perBankRdBursts::12 18223 # Per bank write bursts +system.physmem.perBankRdBursts::13 18272 # Per bank write bursts system.physmem.perBankRdBursts::14 18077 # Per bank write bursts -system.physmem.perBankRdBursts::15 18258 # Per bank write bursts +system.physmem.perBankRdBursts::15 18257 # Per bank write bursts system.physmem.perBankWrBursts::0 4171 # Per bank write bursts system.physmem.perBankWrBursts::1 4099 # Per bank write bursts -system.physmem.perBankWrBursts::2 4134 # Per bank write bursts +system.physmem.perBankWrBursts::2 4135 # Per bank write bursts system.physmem.perBankWrBursts::3 4146 # Per bank write bursts -system.physmem.perBankWrBursts::4 4225 # Per bank write bursts -system.physmem.perBankWrBursts::5 4224 # Per bank write bursts +system.physmem.perBankWrBursts::4 4223 # Per bank write bursts +system.physmem.perBankWrBursts::5 4222 # Per bank write bursts system.physmem.perBankWrBursts::6 4173 # Per bank write bursts system.physmem.perBankWrBursts::7 4094 # Per bank write bursts system.physmem.perBankWrBursts::8 4096 # Per bank write bursts system.physmem.perBankWrBursts::9 4096 # Per bank write bursts system.physmem.perBankWrBursts::10 4096 # Per bank write bursts system.physmem.perBankWrBursts::11 4097 # Per bank write bursts -system.physmem.perBankWrBursts::12 4095 # Per bank write bursts +system.physmem.perBankWrBursts::12 4098 # Per bank write bursts system.physmem.perBankWrBursts::13 4096 # Per bank write bursts system.physmem.perBankWrBursts::14 4096 # Per bank write bursts system.physmem.perBankWrBursts::15 4138 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 489945603000 # Total gap between requests +system.physmem.totGap 512588586500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 291212 # Read request sizes (log2) +system.physmem.readPktSize::6 291222 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -98,9 +98,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 66098 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 290509 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 369 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 13 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 290535 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 355 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 10 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,24 +145,24 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 903 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 903 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 4014 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 4018 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 4018 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 4018 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 4018 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 4017 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 910 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 910 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 4009 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 4016 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 4016 # What write queue length does an incoming req see system.physmem.wrQLenPdf::24 4017 # What write queue length does an incoming req see system.physmem.wrQLenPdf::25 4017 # What write queue length does an incoming req see system.physmem.wrQLenPdf::26 4017 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 4017 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 4017 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 4019 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 4018 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 4016 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 4018 # What write queue length does an incoming req see system.physmem.wrQLenPdf::30 4019 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 4017 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 4017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 4016 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 4016 # What write queue length does an incoming req see system.physmem.wrQLenPdf::33 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::34 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::35 0 # What write queue length does an incoming req see @@ -194,101 +194,102 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 110179 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 207.337369 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 135.107709 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 257.005441 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 44928 40.78% 40.78% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 43473 39.46% 80.23% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 9308 8.45% 88.68% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 1919 1.74% 90.42% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 694 0.63% 91.05% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 753 0.68% 91.74% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 467 0.42% 92.16% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 575 0.52% 92.68% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 8062 7.32% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 110179 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 4017 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 48.520538 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::gmean 34.272045 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 506.481387 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::0-1023 4015 99.95% 99.95% # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 110334 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 207.049577 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 134.865332 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 256.872236 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 45104 40.88% 40.88% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 43590 39.51% 80.39% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 9238 8.37% 88.76% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 1655 1.50% 90.26% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 896 0.81% 91.07% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 605 0.55% 91.62% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 780 0.71% 92.33% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 416 0.38% 92.70% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 8050 7.30% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 110334 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 4016 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 48.533367 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::gmean 34.247557 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 506.662918 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::0-1023 4014 99.95% 99.95% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::2048-3071 1 0.02% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::31744-32767 1 0.02% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 4017 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 4017 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 16.449091 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 16.428808 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 0.834669 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16 3115 77.55% 77.55% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 902 22.45% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 4017 # Writes before turning the bus around for reads -system.physmem.totQLat 3297540750 # Total ticks spent queuing -system.physmem.totMemAccLat 8751747000 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 1454455000 # Total ticks spent in databus transfers -system.physmem.avgQLat 11336.00 # Average queueing delay per DRAM burst +system.physmem.rdPerTurnAround::total 4016 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 4016 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 16.453187 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 16.432732 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 0.838251 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16 3107 77.37% 77.37% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 907 22.58% 99.95% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::19 2 0.05% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 4016 # Writes before turning the bus around for reads +system.physmem.totQLat 2758807250 # Total ticks spent queuing +system.physmem.totMemAccLat 8213182250 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 1454500000 # Total ticks spent in databus transfers +system.physmem.avgQLat 9483.70 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 30086.00 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 38.00 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 8.63 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 38.04 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 8.63 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 28233.70 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 36.32 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 8.25 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 36.36 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 8.25 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 0.36 # Data bus utilization in percentage -system.physmem.busUtilRead 0.30 # Data bus utilization in percentage for reads -system.physmem.busUtilWrite 0.07 # Data bus utilization in percentage for writes +system.physmem.busUtil 0.35 # Data bus utilization in percentage +system.physmem.busUtilRead 0.28 # Data bus utilization in percentage for reads +system.physmem.busUtilWrite 0.06 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing -system.physmem.avgWrQLen 22.85 # Average write queue length when enqueuing -system.physmem.readRowHits 195161 # Number of row buffer hits during reads -system.physmem.writeRowHits 51618 # Number of row buffer hits during writes -system.physmem.readRowHitRate 67.09 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 78.09 # Row buffer hit rate for writes -system.physmem.avgGap 1371205.96 # Average gap between requests -system.physmem.pageHitRate 69.13 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 417417840 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 227757750 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 1136210400 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 215563680 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 32000629440 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 104435392590 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 202355359500 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 340788331200 # Total energy per rank (pJ) -system.physmem_0.averagePower 695.568361 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 335944764000 # Time in different power states -system.physmem_0.memoryStateTime::REF 16360240000 # Time in different power states +system.physmem.avgWrQLen 22.93 # Average write queue length when enqueuing +system.physmem.readRowHits 195021 # Number of row buffer hits during reads +system.physmem.writeRowHits 51610 # Number of row buffer hits during writes +system.physmem.readRowHitRate 67.04 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 78.08 # Row buffer hit rate for writes +system.physmem.avgGap 1434536.51 # Average gap between requests +system.physmem.pageHitRate 69.08 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 417312000 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 227700000 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 1136202600 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 215544240 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 33479521920 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 103911193800 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 216400632000 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 355788106560 # Total energy per rank (pJ) +system.physmem_0.averagePower 694.106023 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 359300376000 # Time in different power states +system.physmem_0.memoryStateTime::REF 17116320000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 137638069000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 136167987750 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 415474920 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 226697625 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 1132396200 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 212608800 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 32000629440 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 104010891930 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 202727728500 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 340726427415 # Total energy per rank (pJ) -system.physmem_1.averagePower 695.442012 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 336564996750 # Time in different power states -system.physmem_1.memoryStateTime::REF 16360240000 # Time in different power states +system.physmem_1.actEnergy 416737440 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 227386500 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 1132435200 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 212628240 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 33479521920 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 103626578835 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 216650294250 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 355745582385 # Total energy per rank (pJ) +system.physmem_1.averagePower 694.023062 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 359717078250 # Time in different power states +system.physmem_1.memoryStateTime::REF 17116320000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 137017032000 # Time in different power states +system.physmem_1.memoryStateTime::ACT 135751825750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 144591747 # Number of BP lookups -system.cpu.branchPred.condPredicted 96197702 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 97552 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 81370677 # Number of BTB lookups -system.cpu.branchPred.BTBHits 61978792 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 147261658 # Number of BP lookups +system.cpu.branchPred.condPredicted 98231058 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 1384734 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 89949366 # Number of BTB lookups +system.cpu.branchPred.BTBHits 63294628 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 76.168461 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 19276085 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 1317 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 15994685 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 15989167 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 5518 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 8032 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 70.366953 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 19276105 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 1312 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 15995155 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 15988941 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 6214 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 1280093 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -318,7 +319,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -348,7 +349,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -378,7 +379,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -409,16 +410,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 673 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 979891395 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 1025177361 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 640655085 # Number of instructions committed system.cpu.committedOps 788730744 # Number of ops (including micro ops) committed -system.cpu.discardedOps 6653282 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 8621768 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.529515 # CPI: cycles per instruction -system.cpu.ipc 0.653802 # IPC: instructions per cycle +system.cpu.cpi 1.600202 # CPI: cycles per instruction +system.cpu.ipc 0.624921 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 385757467 48.91% 48.91% # Class of committed instruction system.cpu.op_class_0::IntMult 5173441 0.66% 49.56% # Class of committed instruction @@ -454,62 +455,62 @@ system.cpu.op_class_0::MemWrite 128980497 16.35% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 788730744 # Class of committed instruction -system.cpu.tickCycles 924243701 # Number of cycles that the object actually ticked -system.cpu.idleCycles 55647694 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 778302 # number of replacements -system.cpu.dcache.tags.tagsinuse 4092.104499 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 378448234 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 782398 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 483.702967 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 792959500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4092.104499 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.999049 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.999049 # Average percentage of cache occupancy +system.cpu.tickCycles 955908039 # Number of cycles that the object actually ticked +system.cpu.idleCycles 69269322 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 778100 # number of replacements +system.cpu.dcache.tags.tagsinuse 4092.241926 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 378449407 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 782196 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 483.829382 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 798177500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4092.241926 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.999083 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.999083 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 31 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 182 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 971 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 1499 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::4 1413 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 30 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 177 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 968 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 1420 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::4 1501 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 759382252 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 759382252 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 249619506 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 249619506 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 128813766 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 128813766 # number of WriteReq hits +system.cpu.dcache.tags.tag_accesses 759383100 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 759383100 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 249620680 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 249620680 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 128813765 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 128813765 # number of WriteReq hits system.cpu.dcache.SoftPFReq_hits::cpu.data 3484 # number of SoftPFReq hits system.cpu.dcache.SoftPFReq_hits::total 3484 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 5739 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 5739 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 5739 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 5739 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 378433272 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 378433272 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 378436756 # number of overall hits -system.cpu.dcache.overall_hits::total 378436756 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 713841 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 713841 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 137711 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 137711 # number of WriteReq misses +system.cpu.dcache.demand_hits::cpu.data 378434445 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 378434445 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 378437929 # number of overall hits +system.cpu.dcache.overall_hits::total 378437929 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 713192 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 713192 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 137712 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 137712 # number of WriteReq misses system.cpu.dcache.SoftPFReq_misses::cpu.data 141 # number of SoftPFReq misses system.cpu.dcache.SoftPFReq_misses::total 141 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 851552 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 851552 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 851693 # number of overall misses -system.cpu.dcache.overall_misses::total 851693 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 25188260500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 25188260500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 10109820000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 10109820000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 35298080500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 35298080500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 35298080500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 35298080500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 250333347 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 250333347 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 850904 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 850904 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 851045 # number of overall misses +system.cpu.dcache.overall_misses::total 851045 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 24628452500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 24628452500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 10137526000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 10137526000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 34765978500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 34765978500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 34765978500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 34765978500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 250333872 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 250333872 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 128951477 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 128951477 # number of WriteReq accesses(hits+misses) system.cpu.dcache.SoftPFReq_accesses::cpu.data 3625 # number of SoftPFReq accesses(hits+misses) @@ -518,274 +519,274 @@ system.cpu.dcache.LoadLockedReq_accesses::cpu.data 5739 system.cpu.dcache.LoadLockedReq_accesses::total 5739 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 5739 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 5739 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 379284824 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 379284824 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 379288449 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 379288449 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.002852 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.002852 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 379285349 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 379285349 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 379288974 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 379288974 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.002849 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.002849 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.001068 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.001068 # miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.038897 # miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_miss_rate::total 0.038897 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.002245 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.002245 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.002246 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.002246 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 35285.533473 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 35285.533473 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73413.307579 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 73413.307579 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 41451.468025 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 41451.468025 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 41444.605627 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 41444.605627 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.002243 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.002243 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.002244 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.002244 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 34532.709986 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 34532.709986 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73613.962472 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 73613.962472 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 40857.697813 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 40857.697813 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 40850.928564 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 40850.928564 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 88712 # number of writebacks -system.cpu.dcache.writebacks::total 88712 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 904 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 904 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 68389 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 68389 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 69293 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 69293 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 69293 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 69293 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 712937 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 712937 # number of ReadReq MSHR misses +system.cpu.dcache.writebacks::writebacks 88716 # number of writebacks +system.cpu.dcache.writebacks::total 88716 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 457 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 457 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 68390 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 68390 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 68847 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 68847 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 68847 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 68847 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 712735 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 712735 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 69322 # number of WriteReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::total 69322 # number of WriteReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 139 # number of SoftPFReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::total 139 # number of SoftPFReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 782259 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 782259 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 782398 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 782398 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 24459771500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 24459771500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5070040000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 5070040000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_misses::cpu.data 782057 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 782057 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 782196 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 782196 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 23907337500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 23907337500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 5084282000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 5084282000 # number of WriteReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 1788000 # number of SoftPFReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 1788000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 29529811500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 29529811500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 29531599500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 29531599500 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.002848 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.002848 # mshr miss rate for ReadReq accesses +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 28991619500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 28991619500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 28993407500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 28993407500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.002847 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.002847 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000538 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.000538 # mshr miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.038345 # mshr miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.038345 # mshr miss rate for SoftPFReq accesses system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.002062 # mshr miss rate for demand accesses system.cpu.dcache.demand_mshr_miss_rate::total 0.002062 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.002063 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.002063 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 34308.461337 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 34308.461337 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73137.532097 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73137.532097 # average WriteReq mshr miss latency +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.002062 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.002062 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 33543.094558 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 33543.094558 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 73342.979141 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 73342.979141 # average WriteReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 12863.309353 # average SoftPFReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 12863.309353 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 37749.404609 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 37749.404609 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 37744.983372 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 37744.983372 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 24859 # number of replacements -system.cpu.icache.tags.tagsinuse 1712.892625 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 252585994 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 26612 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 9491.432211 # Average number of references to valid blocks. +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 37070.980120 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 37070.980120 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 37066.678301 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 37066.678301 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 24885 # number of replacements +system.cpu.icache.tags.tagsinuse 1711.979735 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 257789647 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 26636 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 9678.241741 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1712.892625 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.836373 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.836373 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1753 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 54 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 100 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1599 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.855957 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 505251826 # Number of tag accesses -system.cpu.icache.tags.data_accesses 505251826 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 252585994 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 252585994 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 252585994 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 252585994 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 252585994 # number of overall hits -system.cpu.icache.overall_hits::total 252585994 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 26613 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 26613 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 26613 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 26613 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 26613 # number of overall misses -system.cpu.icache.overall_misses::total 26613 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 516729500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 516729500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 516729500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 516729500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 516729500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 516729500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 252612607 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 252612607 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 252612607 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 252612607 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 252612607 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 252612607 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000105 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.000105 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.000105 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.000105 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.000105 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.000105 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 19416.431819 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 19416.431819 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 19416.431819 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 19416.431819 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 19416.431819 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 19416.431819 # average overall miss latency +system.cpu.icache.tags.occ_blocks::cpu.inst 1711.979735 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.835928 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.835928 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1751 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 57 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 98 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1596 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.854980 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 515659204 # Number of tag accesses +system.cpu.icache.tags.data_accesses 515659204 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 257789647 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 257789647 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 257789647 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 257789647 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 257789647 # number of overall hits +system.cpu.icache.overall_hits::total 257789647 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 26637 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 26637 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 26637 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 26637 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 26637 # number of overall misses +system.cpu.icache.overall_misses::total 26637 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 515552500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 515552500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 515552500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 515552500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 515552500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 515552500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 257816284 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 257816284 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 257816284 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 257816284 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 257816284 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 257816284 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000103 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.000103 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.000103 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.000103 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.000103 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.000103 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 19354.750910 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 19354.750910 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 19354.750910 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 19354.750910 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 19354.750910 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 19354.750910 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 24859 # number of writebacks -system.cpu.icache.writebacks::total 24859 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 26613 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 26613 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 26613 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 26613 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 26613 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 26613 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 490117500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 490117500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 490117500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 490117500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 490117500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 490117500 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000105 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000105 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000105 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.000105 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000105 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.000105 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 18416.469395 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 18416.469395 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 18416.469395 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 18416.469395 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 18416.469395 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 18416.469395 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 258808 # number of replacements -system.cpu.l2cache.tags.tagsinuse 32560.749490 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 1247790 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 291552 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 4.279820 # Average number of references to valid blocks. +system.cpu.icache.writebacks::writebacks 24885 # number of writebacks +system.cpu.icache.writebacks::total 24885 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 26637 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 26637 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 26637 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 26637 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 26637 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 26637 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 488916500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 488916500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 488916500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 488916500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 488916500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 488916500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000103 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000103 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000103 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.000103 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000103 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.000103 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 18354.788452 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 18354.788452 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 18354.788452 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 18354.788452 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 18354.788452 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 18354.788452 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 258816 # number of replacements +system.cpu.l2cache.tags.tagsinuse 32567.443571 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 1247529 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 291562 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 4.278778 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 2632.544658 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 88.421700 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 29839.783132 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.080339 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.002698 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.910638 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.993675 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 32744 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 118 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 213 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 326 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 3136 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 28951 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.999268 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 13231738 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 13231738 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 88712 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 88712 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 23528 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 23528 # number of WritebackClean hits +system.cpu.l2cache.tags.occ_blocks::writebacks 2619.708679 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 89.014636 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 29858.720256 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.079947 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.002717 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.911216 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.993880 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 32746 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 124 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 208 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 308 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 2978 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 29128 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.999329 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 13229556 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 13229556 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 88716 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 88716 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 23552 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 23552 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 3231 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 3231 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 24049 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 24049 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 490486 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 490486 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 24049 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 493717 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 517766 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 24049 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 493717 # number of overall hits -system.cpu.l2cache.overall_hits::total 517766 # number of overall hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 24067 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 24067 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 490282 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 490282 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 24067 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 493513 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 517580 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 24067 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 493513 # number of overall hits +system.cpu.l2cache.overall_hits::total 517580 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 66091 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 66091 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2564 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 2564 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 222590 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 222590 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 2564 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 288681 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 291245 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 2564 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 288681 # number of overall misses -system.cpu.l2cache.overall_misses::total 291245 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 4932129000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 4932129000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 196405000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 196405000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 18239788500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 18239788500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 196405000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 23171917500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 23368322500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 196405000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 23171917500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 23368322500 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 88712 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 88712 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 23528 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 23528 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2570 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 2570 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 222592 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 222592 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 2570 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 288683 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 291253 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 2570 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 288683 # number of overall misses +system.cpu.l2cache.overall_misses::total 291253 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 4946370000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 4946370000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 194980000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 194980000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 17689881000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 17689881000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 194980000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 22636251000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 22831231000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 194980000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 22636251000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 22831231000 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 88716 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 88716 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 23552 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 23552 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 69322 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 69322 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 26613 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 26613 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 713076 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 713076 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 26613 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 782398 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 809011 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 26613 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 782398 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 809011 # number of overall (read+write) accesses +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 26637 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 26637 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 712874 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 712874 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 26637 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 782196 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 808833 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 26637 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 782196 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 808833 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.953391 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.953391 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.096344 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.096344 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.312155 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.312155 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.096344 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.368970 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.360001 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.096344 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.368970 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.360001 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74626.333389 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74626.333389 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 76601.014041 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 76601.014041 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 81943.431870 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 81943.431870 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 76601.014041 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 80268.245919 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 80235.961132 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 76601.014041 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 80268.245919 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 80235.961132 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.096482 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.096482 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.312246 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.312246 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.096482 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.369067 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.360090 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.096482 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.369067 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.360090 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74841.809021 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74841.809021 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 75867.704280 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 75867.704280 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 79472.222721 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 79472.222721 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 75867.704280 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 78412.137189 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 78389.685256 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 75867.704280 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 78412.137189 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 78389.685256 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -794,128 +795,130 @@ system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.l2cache.writebacks::writebacks 66098 # number of writebacks system.cpu.l2cache.writebacks::total 66098 # number of writebacks -system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 5 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadCleanReq_mshr_hits::total 5 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 27 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 27 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.inst 5 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 27 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 32 # number of demand (read+write) MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.inst 5 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 27 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 32 # number of overall MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 4 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::total 4 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 26 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 26 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.inst 4 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 26 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 30 # number of demand (read+write) MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.inst 4 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.data 26 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 30 # number of overall MSHR hits system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 66091 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 66091 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2559 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2559 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 222563 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 222563 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 2559 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 288654 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 291213 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 2559 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 288654 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 291213 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4271219000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4271219000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 170500500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 170500500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 16012410500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 16012410500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 170500500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 20283629500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 20454130000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 170500500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 20283629500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 20454130000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2566 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2566 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 222566 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 222566 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 2566 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 288657 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 291223 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 2566 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 288657 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 291223 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 4285460000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 4285460000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 169076000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 169076000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 15462440500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 15462440500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 169076000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 19747900500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 19916976500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 169076000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 19747900500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 19916976500 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.953391 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.953391 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.096156 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.096156 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.312117 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.312117 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.096156 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.368935 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.359962 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.096156 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.368935 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.359962 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64626.333389 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64626.333389 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 66627.784291 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 66627.784291 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 71945.518797 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 71945.518797 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 66627.784291 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 70269.698324 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 70237.695433 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 66627.784291 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 70269.698324 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 70237.695433 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 1612172 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 803221 # Number of requests hitting in the snoop filter with a single holder of the requested data. -system.cpu.toL2Bus.snoop_filter.hit_multi_requests 3314 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.096332 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.096332 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.312209 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.312209 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.096332 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.369034 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.360053 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.096332 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.369034 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.360053 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64841.809021 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64841.809021 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 65890.880748 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 65890.880748 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 69473.506735 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 69473.506735 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 65890.880748 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 68413.031730 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 68390.808762 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 65890.880748 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 68413.031730 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 68390.808762 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 1611818 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 803044 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.hit_multi_requests 3234 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 2027 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 2012 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 15 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 739688 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 154810 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 24859 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 882300 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 739510 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 154814 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 24885 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 882102 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 69322 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 69322 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 26613 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 713076 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 78084 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2343098 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 2421182 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 3294144 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55751040 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 59045184 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 258808 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 1067819 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.005072 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.071235 # Request fanout histogram +system.cpu.toL2Bus.trans_dist::ReadCleanReq 26637 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 712874 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 78158 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2342492 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 2420650 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 3297344 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55738368 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 59035712 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 258816 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4230272 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 1067649 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.004997 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.070711 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 1062418 99.49% 99.49% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 5386 0.50% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 1062329 99.50% 99.50% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 5305 0.50% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 15 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 2 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 1067819 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 919657000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 1067649 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 919510000 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.2 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 39920495 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 39955996 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 1173610473 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 1173306974 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.2 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 489945697500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 225121 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 512588680500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 225131 # Transaction distribution system.membus.trans_dist::WritebackDirty 66098 # Transaction distribution -system.membus.trans_dist::CleanEvict 190682 # Transaction distribution +system.membus.trans_dist::CleanEvict 190690 # Transaction distribution system.membus.trans_dist::ReadExReq 66091 # Transaction distribution system.membus.trans_dist::ReadExResp 66091 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 225121 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 839204 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 839204 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22867840 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 22867840 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 225131 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 839232 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 839232 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22868480 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 22868480 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 547992 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 548010 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 547992 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 548010 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 547992 # Request fanout histogram -system.membus.reqLayer0.occupancy 916865000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 548010 # Request fanout histogram +system.membus.reqLayer0.occupancy 917220500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.2 # Layer utilization (%) -system.membus.respLayer1.occupancy 1554037500 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 1554785500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.3 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/config.ini b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/config.ini index 1c8fe2c03..b1c9ef7ec 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/perlbmk gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simerr b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simerr index 2e6ab1e7e..c1f3592f9 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: fcntl64(3, 2) passed through to host diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simout b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simout index 17f97ea42..d9571e5e1 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/o3-timing gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 15 2016 19:53:43 -gem5 started Mar 15 2016 20:24:49 -gem5 executing on dinar2c11, pid 10851 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:22 +gem5 executing on e108600-lin, pid 23079 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/stats.txt index 77ad5d4bc..f24017e10 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.326731 # Nu sim_ticks 326731324000 # Number of ticks simulated final_tick 326731324000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 187465 # Simulator instruction rate (inst/s) -host_op_rate 230795 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 95607340 # Simulator tick rate (ticks/s) -host_mem_usage 320048 # Number of bytes of host memory used -host_seconds 3417.43 # Real time elapsed on the host +host_inst_rate 137546 # Simulator instruction rate (inst/s) +host_op_rate 169337 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 70148373 # Simulator tick rate (ticks/s) +host_mem_usage 272916 # Number of bytes of host memory used +host_seconds 4657.72 # Real time elapsed on the host sim_insts 640649299 # Number of instructions simulated sim_ops 788724958 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1200,6 +1200,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 352858624 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 606315968 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 1296784 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4257152 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 6034326 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.339099 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.661177 # Request fanout histogram @@ -1230,6 +1231,7 @@ system.membus.pkt_count::total 2200100 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 65252672 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 65252672 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 1246861 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/config.ini b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/config.ini index 844cbdea4..5997dda79 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/config.ini +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -106,9 +116,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -122,9 +137,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -182,9 +202,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -198,9 +223,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -218,7 +248,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/perlbmk gid=100 input=cin kvmInSE=false @@ -250,10 +280,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -268,11 +303,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simerr b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simerr index 2de5e2759..937e051a4 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simerr +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simerr @@ -1,2 +1,3 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: fcntl64(3, 2) passed through to host diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simout b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simout index 82929fd24..74eea3e5b 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simout +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-at gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jul 3 2015 17:56:07 -gem5 started Jul 3 2015 22:26:16 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-atomic -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-atomic +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:51:02 +gem5 executing on e108600-lin, pid 23320 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/arm/linux/simple-atomic Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/stats.txt b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/stats.txt index e2d47dff8..889d833d4 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/stats.txt +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.395727 # Nu sim_ticks 395726778500 # Number of ticks simulated final_tick 395726778500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1817115 # Simulator instruction rate (inst/s) -host_op_rate 2237108 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1122416416 # Simulator tick rate (ticks/s) -host_mem_usage 311336 # Number of bytes of host memory used -host_seconds 352.57 # Real time elapsed on the host +host_inst_rate 860032 # Simulator instruction rate (inst/s) +host_op_rate 1058813 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 531234389 # Simulator tick rate (ticks/s) +host_mem_usage 264584 # Number of bytes of host memory used +host_seconds 744.92 # Real time elapsed on the host sim_insts 640654411 # Number of instructions simulated sim_ops 788730070 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -237,6 +237,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 2573511596 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 1668035929 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 4241547525 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 1022670353 # Request fanout histogram system.membus.snoop_fanout::mean 0.629116 # Request fanout histogram system.membus.snoop_fanout::stdev 0.483042 # Request fanout histogram diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/config.ini b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/config.ini index b8d9750e5..ab5a083f3 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/config.ini +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -90,12 +100,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -114,8 +129,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -138,9 +158,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -154,9 +179,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -167,12 +197,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +226,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -250,9 +290,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -266,9 +311,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -279,12 +329,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -303,8 +358,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -312,10 +372,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -346,7 +411,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/perlbmk +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/perlbmk gid=100 input=cin kvmInSE=false @@ -378,10 +443,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -396,11 +466,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simerr b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simerr index 2de5e2759..937e051a4 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simerr +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simerr @@ -1,2 +1,3 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: fcntl64(3, 2) passed through to host diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simout b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simout index 918fa3cf1..75004ec86 100755 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simout +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-ti gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jul 3 2015 17:56:07 -gem5 started Jul 3 2015 18:33:02 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:46:39 +gem5 executing on e108600-lin, pid 23194 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/40.perlbmk/arm/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/40.perlbmk/arm/linux/simple-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/stats.txt b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/stats.txt index fc47d4b38..3a062984a 100644 --- a/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/stats.txt +++ b/tests/long/se/40.perlbmk/ref/arm/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 1.045756 # Nu sim_ticks 1045756396500 # Number of ticks simulated final_tick 1045756396500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1150404 # Simulator instruction rate (inst/s) -host_op_rate 1413341 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1881615398 # Simulator tick rate (ticks/s) -host_mem_usage 320304 # Number of bytes of host memory used -host_seconds 555.78 # Real time elapsed on the host +host_inst_rate 546786 # Simulator instruction rate (inst/s) +host_op_rate 671760 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 894330624 # Simulator tick rate (ticks/s) +host_mem_usage 273552 # Number of bytes of host memory used +host_seconds 1169.32 # Real time elapsed on the host sim_insts 639366787 # Number of instructions simulated sim_ops 785501035 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -627,6 +627,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 55752768 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 56967296 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 257772 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 4230272 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 1050122 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.002597 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.051024 # Request fanout histogram @@ -656,6 +657,7 @@ system.membus.pkt_count::total 836928 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 22813824 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 22813824 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 546561 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/config.ini index a6321b5a0..4117f093b 100644 --- a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/vortex +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/vortex gid=100 input=cin kvmInSE=false @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simout b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simout index 9dd4d1ffb..dcc24233a 100755 --- a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/simout @@ -3,12 +3,12 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/minor- gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:22:43 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4306 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/50.vortex/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. -Exiting @ tick 59549031000 because target called exit() +Exiting @ tick 60000593000 because target called exit() diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/stats.txt index 7d04a6897..6234d30e2 100644 --- a/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/50.vortex/ref/alpha/tru64/minor-timing/stats.txt @@ -1,106 +1,106 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.059447 # Number of seconds simulated -sim_ticks 59447065000 # Number of ticks simulated -final_tick 59447065000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.060001 # Number of seconds simulated +sim_ticks 60000593000 # Number of ticks simulated +final_tick 60000593000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 518825 # Simulator instruction rate (inst/s) -host_op_rate 518825 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 348748418 # Simulator tick rate (ticks/s) -host_mem_usage 305412 # Number of bytes of host memory used -host_seconds 170.46 # Real time elapsed on the host +host_inst_rate 262235 # Simulator instruction rate (inst/s) +host_op_rate 262235 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 177912819 # Simulator tick rate (ticks/s) +host_mem_usage 257844 # Number of bytes of host memory used +host_seconds 337.25 # Real time elapsed on the host sim_insts 88438073 # Number of instructions simulated sim_ops 88438073 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 432832 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 10149568 # Number of bytes read from this memory -system.physmem.bytes_read::total 10582400 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 432832 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 432832 # Number of instructions bytes read from this memory -system.physmem.bytes_written::writebacks 7326016 # Number of bytes written to this memory -system.physmem.bytes_written::total 7326016 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 6763 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 158587 # Number of read requests responded to by this memory -system.physmem.num_reads::total 165350 # Number of read requests responded to by this memory -system.physmem.num_writes::writebacks 114469 # Number of write requests responded to by this memory -system.physmem.num_writes::total 114469 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 7280965 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 170732870 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 178013835 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 7280965 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 7280965 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 123235958 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 123235958 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 123235958 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 7280965 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 170732870 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 301249793 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 165350 # Number of read requests accepted -system.physmem.writeReqs 114469 # Number of write requests accepted -system.physmem.readBursts 165350 # Number of DRAM read bursts, including those serviced by the write queue -system.physmem.writeBursts 114469 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 10581952 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 448 # Total number of bytes read from write queue -system.physmem.bytesWritten 7323968 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 10582400 # Total read bytes from the system interface side -system.physmem.bytesWrittenSys 7326016 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 7 # Number of DRAM read bursts serviced by the write queue +system.physmem.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 433344 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 10150272 # Number of bytes read from this memory +system.physmem.bytes_read::total 10583616 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 433344 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 433344 # Number of instructions bytes read from this memory +system.physmem.bytes_written::writebacks 7325952 # Number of bytes written to this memory +system.physmem.bytes_written::total 7325952 # Number of bytes written to this memory +system.physmem.num_reads::cpu.inst 6771 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 158598 # Number of read requests responded to by this memory +system.physmem.num_reads::total 165369 # Number of read requests responded to by this memory +system.physmem.num_writes::writebacks 114468 # Number of write requests responded to by this memory +system.physmem.num_writes::total 114468 # Number of write requests responded to by this memory +system.physmem.bw_read::cpu.inst 7222329 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 169169528 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 176391857 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 7222329 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 7222329 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 122097993 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 122097993 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 122097993 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 7222329 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 169169528 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 298489850 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 165369 # Number of read requests accepted +system.physmem.writeReqs 114468 # Number of write requests accepted +system.physmem.readBursts 165369 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.writeBursts 114468 # Number of DRAM write bursts, including those merged in the write queue +system.physmem.bytesReadDRAM 10583232 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 384 # Total number of bytes read from write queue +system.physmem.bytesWritten 7324288 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 10583616 # Total read bytes from the system interface side +system.physmem.bytesWrittenSys 7325952 # Total written bytes from the system interface side +system.physmem.servicedByWrQ 6 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 10315 # Per bank write bursts -system.physmem.perBankRdBursts::1 10360 # Per bank write bursts +system.physmem.perBankRdBursts::0 10322 # Per bank write bursts +system.physmem.perBankRdBursts::1 10363 # Per bank write bursts system.physmem.perBankRdBursts::2 10206 # Per bank write bursts -system.physmem.perBankRdBursts::3 10057 # Per bank write bursts -system.physmem.perBankRdBursts::4 10348 # Per bank write bursts +system.physmem.perBankRdBursts::3 10055 # Per bank write bursts +system.physmem.perBankRdBursts::4 10347 # Per bank write bursts system.physmem.perBankRdBursts::5 10343 # Per bank write bursts -system.physmem.perBankRdBursts::6 9775 # Per bank write bursts -system.physmem.perBankRdBursts::7 10207 # Per bank write bursts -system.physmem.perBankRdBursts::8 10536 # Per bank write bursts -system.physmem.perBankRdBursts::9 10606 # Per bank write bursts -system.physmem.perBankRdBursts::10 10500 # Per bank write bursts -system.physmem.perBankRdBursts::11 10228 # Per bank write bursts -system.physmem.perBankRdBursts::12 10273 # Per bank write bursts -system.physmem.perBankRdBursts::13 10559 # Per bank write bursts -system.physmem.perBankRdBursts::14 10465 # Per bank write bursts -system.physmem.perBankRdBursts::15 10565 # Per bank write bursts +system.physmem.perBankRdBursts::6 9774 # Per bank write bursts +system.physmem.perBankRdBursts::7 10209 # Per bank write bursts +system.physmem.perBankRdBursts::8 10543 # Per bank write bursts +system.physmem.perBankRdBursts::9 10609 # Per bank write bursts +system.physmem.perBankRdBursts::10 10499 # Per bank write bursts +system.physmem.perBankRdBursts::11 10227 # Per bank write bursts +system.physmem.perBankRdBursts::12 10274 # Per bank write bursts +system.physmem.perBankRdBursts::13 10565 # Per bank write bursts +system.physmem.perBankRdBursts::14 10463 # Per bank write bursts +system.physmem.perBankRdBursts::15 10564 # Per bank write bursts system.physmem.perBankWrBursts::0 7163 # Per bank write bursts system.physmem.perBankWrBursts::1 7274 # Per bank write bursts system.physmem.perBankWrBursts::2 7296 # Per bank write bursts -system.physmem.perBankWrBursts::3 7002 # Per bank write bursts +system.physmem.perBankWrBursts::3 7001 # Per bank write bursts system.physmem.perBankWrBursts::4 7127 # Per bank write bursts -system.physmem.perBankWrBursts::5 7186 # Per bank write bursts +system.physmem.perBankWrBursts::5 7187 # Per bank write bursts system.physmem.perBankWrBursts::6 6833 # Per bank write bursts -system.physmem.perBankWrBursts::7 7099 # Per bank write bursts -system.physmem.perBankWrBursts::8 7226 # Per bank write bursts -system.physmem.perBankWrBursts::9 6999 # Per bank write bursts +system.physmem.perBankWrBursts::7 7100 # Per bank write bursts +system.physmem.perBankWrBursts::8 7227 # Per bank write bursts +system.physmem.perBankWrBursts::9 7003 # Per bank write bursts system.physmem.perBankWrBursts::10 7117 # Per bank write bursts -system.physmem.perBankWrBursts::11 7034 # Per bank write bursts +system.physmem.perBankWrBursts::11 7031 # Per bank write bursts system.physmem.perBankWrBursts::12 6992 # Per bank write bursts -system.physmem.perBankWrBursts::13 7299 # Per bank write bursts -system.physmem.perBankWrBursts::14 7307 # Per bank write bursts -system.physmem.perBankWrBursts::15 7483 # Per bank write bursts +system.physmem.perBankWrBursts::13 7301 # Per bank write bursts +system.physmem.perBankWrBursts::14 7308 # Per bank write bursts +system.physmem.perBankWrBursts::15 7482 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 59447041000 # Total gap between requests +system.physmem.totGap 60000569500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 165350 # Read request sizes (log2) +system.physmem.readPktSize::6 165369 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) system.physmem.writePktSize::3 0 # Write request sizes (log2) system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) -system.physmem.writePktSize::6 114469 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 163735 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 1580 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 28 # What read queue length does an incoming req see +system.physmem.writePktSize::6 114468 # Write request sizes (log2) +system.physmem.rdQLenPdf::0 164021 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 1324 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 18 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,27 +145,27 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 750 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 772 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 736 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 758 # What write queue length does an incoming req see system.physmem.wrQLenPdf::17 6187 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 7002 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 7044 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 7073 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 7064 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 7070 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 7073 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 7076 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 7124 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 7113 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 7242 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 7218 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 7141 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 7356 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 7098 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 7043 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 8 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::34 0 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::35 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 6999 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 7050 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 7061 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 7059 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 7071 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 7072 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 7099 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 7116 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 7119 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 7227 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 7244 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 7151 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 7350 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 7097 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 7044 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 10 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::34 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::35 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::36 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::37 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::38 0 # What write queue length does an incoming req see @@ -194,127 +194,126 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 54692 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 327.365172 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 194.328231 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 330.549756 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 19615 35.86% 35.86% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 11787 21.55% 57.42% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 5586 10.21% 67.63% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 3666 6.70% 74.33% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 2860 5.23% 79.56% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 2087 3.82% 83.38% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 1603 2.93% 86.31% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 1458 2.67% 88.97% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 6030 11.03% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 54692 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 7042 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 23.476853 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 336.379045 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::0-1023 7039 99.96% 99.96% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::1024-2047 2 0.03% 99.99% # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 54736 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 327.137094 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 194.166991 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 330.705237 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 19617 35.84% 35.84% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 11794 21.55% 57.39% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 5683 10.38% 67.77% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 3657 6.68% 74.45% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 2805 5.12% 79.57% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 2027 3.70% 83.28% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 1612 2.95% 86.22% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 1505 2.75% 88.97% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 6036 11.03% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 54736 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 7044 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 23.474162 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 336.252876 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::0-1023 7042 99.97% 99.97% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::1024-2047 1 0.01% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::27648-28671 1 0.01% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 7042 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 7042 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 16.250639 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 16.234557 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 0.758479 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16 6275 89.11% 89.11% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::17 11 0.16% 89.26% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 574 8.15% 97.42% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::19 150 2.13% 99.55% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::20 18 0.26% 99.80% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::21 9 0.13% 99.93% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::22 2 0.03% 99.96% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::23 1 0.01% 99.97% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::25 1 0.01% 99.99% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::27 1 0.01% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 7042 # Writes before turning the bus around for reads -system.physmem.totQLat 1988923000 # Total ticks spent queuing -system.physmem.totMemAccLat 5089104250 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 826715000 # Total ticks spent in databus transfers -system.physmem.avgQLat 12029.07 # Average queueing delay per DRAM burst +system.physmem.rdPerTurnAround::total 7044 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 7044 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 16.246735 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 16.230854 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 0.753728 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16 6287 89.25% 89.25% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::17 12 0.17% 89.42% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 574 8.15% 97.57% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::19 138 1.96% 99.53% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::20 18 0.26% 99.79% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::21 7 0.10% 99.89% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::22 3 0.04% 99.93% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::23 3 0.04% 99.97% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::25 2 0.03% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 7044 # Writes before turning the bus around for reads +system.physmem.totQLat 1985984500 # Total ticks spent queuing +system.physmem.totMemAccLat 5086540750 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 826815000 # Total ticks spent in databus transfers +system.physmem.avgQLat 12009.85 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 30779.07 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 178.01 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 123.20 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 178.01 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 123.24 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 30759.85 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 176.39 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 122.07 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 176.39 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 122.10 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 2.35 # Data bus utilization in percentage -system.physmem.busUtilRead 1.39 # Data bus utilization in percentage for reads -system.physmem.busUtilWrite 0.96 # Data bus utilization in percentage for writes +system.physmem.busUtil 2.33 # Data bus utilization in percentage +system.physmem.busUtilRead 1.38 # Data bus utilization in percentage for reads +system.physmem.busUtilWrite 0.95 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing -system.physmem.avgWrQLen 23.77 # Average write queue length when enqueuing -system.physmem.readRowHits 143858 # Number of row buffer hits during reads -system.physmem.writeRowHits 81218 # Number of row buffer hits during writes -system.physmem.readRowHitRate 87.01 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 70.95 # Row buffer hit rate for writes -system.physmem.avgGap 212448.19 # Average gap between requests -system.physmem.pageHitRate 80.44 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 199274040 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 108730875 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 636347400 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 369068400 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 3882347040 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 12411408285 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 24777095250 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 42384271290 # Total energy per rank (pJ) -system.physmem_0.averagePower 713.053838 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 41070575000 # Time in different power states -system.physmem_0.memoryStateTime::REF 1984840000 # Time in different power states +system.physmem.avgWrQLen 24.00 # Average write queue length when enqueuing +system.physmem.readRowHits 143816 # Number of row buffer hits during reads +system.physmem.writeRowHits 81240 # Number of row buffer hits during writes +system.physmem.readRowHitRate 86.97 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 70.97 # Row buffer hit rate for writes +system.physmem.avgGap 214412.57 # Average gap between requests +system.physmem.pageHitRate 80.43 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 198964080 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 108561750 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 636386400 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 369061920 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 3918454800 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 12421358775 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 25100061000 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 42752848725 # Total energy per rank (pJ) +system.physmem_0.averagePower 712.626862 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 41606215000 # Time in different power states +system.physmem_0.memoryStateTime::REF 2003300000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 16385091250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 16383815000 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 213940440 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 116733375 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 652860000 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 372152880 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 3882347040 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 13085746785 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 24185582250 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 42509362770 # Total energy per rank (pJ) -system.physmem_1.averagePower 715.158080 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 40083292000 # Time in different power states -system.physmem_1.memoryStateTime::REF 1984840000 # Time in different power states +system.physmem_1.actEnergy 214545240 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 117063375 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 652945800 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 372211200 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 3918454800 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 13100937570 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 24503939250 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 42880097235 # Total energy per rank (pJ) +system.physmem_1.averagePower 714.747907 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 40611255500 # Time in different power states +system.physmem_1.memoryStateTime::REF 2003300000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 17372590500 # Time in different power states +system.physmem_1.memoryStateTime::ACT 17379160000 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 14660042 # Number of BP lookups -system.cpu.branchPred.condPredicted 9484785 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 381684 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 9866507 # Number of BTB lookups -system.cpu.branchPred.BTBHits 6346497 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 14695118 # Number of BP lookups +system.cpu.branchPred.condPredicted 9500860 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 385258 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 10182600 # Number of BTB lookups +system.cpu.branchPred.BTBHits 6367092 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 64.323646 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 1708762 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 84355 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 37443 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 31778 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 5665 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 7605 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 62.529138 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 1712185 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 84621 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 37568 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 31792 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 5776 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 7597 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 20565775 # DTB read hits -system.cpu.dtb.read_misses 97355 # DTB read misses -system.cpu.dtb.read_acv 8 # DTB read access violations -system.cpu.dtb.read_accesses 20663130 # DTB read accesses -system.cpu.dtb.write_hits 14665271 # DTB write hits -system.cpu.dtb.write_misses 9409 # DTB write misses +system.cpu.dtb.read_hits 20578668 # DTB read hits +system.cpu.dtb.read_misses 95435 # DTB read misses +system.cpu.dtb.read_acv 10 # DTB read access violations +system.cpu.dtb.read_accesses 20674103 # DTB read accesses +system.cpu.dtb.write_hits 14665915 # DTB write hits +system.cpu.dtb.write_misses 8842 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 14674680 # DTB write accesses -system.cpu.dtb.data_hits 35231046 # DTB hits -system.cpu.dtb.data_misses 106764 # DTB misses -system.cpu.dtb.data_acv 8 # DTB access violations -system.cpu.dtb.data_accesses 35337810 # DTB accesses -system.cpu.itb.fetch_hits 25585531 # ITB hits -system.cpu.itb.fetch_misses 5208 # ITB misses +system.cpu.dtb.write_accesses 14674757 # DTB write accesses +system.cpu.dtb.data_hits 35244583 # DTB hits +system.cpu.dtb.data_misses 104277 # DTB misses +system.cpu.dtb.data_acv 10 # DTB access violations +system.cpu.dtb.data_accesses 35348860 # DTB accesses +system.cpu.itb.fetch_hits 25646396 # ITB hits +system.cpu.itb.fetch_misses 5177 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 25590739 # ITB accesses +system.cpu.itb.fetch_accesses 25651573 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -328,16 +327,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 4583 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 118894130 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 120001186 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 88438073 # Number of instructions committed system.cpu.committedOps 88438073 # Number of ops (including micro ops) committed -system.cpu.discardedOps 1097381 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 1084586 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.344377 # CPI: cycles per instruction -system.cpu.ipc 0.743839 # IPC: instructions per cycle +system.cpu.cpi 1.356895 # CPI: cycles per instruction +system.cpu.ipc 0.736977 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 8748916 9.89% 9.89% # Class of committed instruction system.cpu.op_class_0::IntAlu 44394799 50.20% 60.09% # Class of committed instruction system.cpu.op_class_0::IntMult 41101 0.05% 60.14% # Class of committed instruction @@ -373,106 +372,106 @@ system.cpu.op_class_0::MemWrite 14620629 16.53% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 88438073 # Class of committed instruction -system.cpu.tickCycles 91425505 # Number of cycles that the object actually ticked -system.cpu.idleCycles 27468625 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 200766 # number of replacements -system.cpu.dcache.tags.tagsinuse 4070.673886 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 34612040 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 204862 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 168.952954 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 687650500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4070.673886 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.993817 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.993817 # Average percentage of cache occupancy +system.cpu.tickCycles 91986001 # Number of cycles that the object actually ticked +system.cpu.idleCycles 28015185 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 200807 # number of replacements +system.cpu.dcache.tags.tagsinuse 4070.707874 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 34647558 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 204903 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 169.092488 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 690770500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4070.707874 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.993825 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.993825 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 49 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 687 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 3360 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 48 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 661 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 3387 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 70168000 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 70168000 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 20278781 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 20278781 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 14333259 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 14333259 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 34612040 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 34612040 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 34612040 # number of overall hits -system.cpu.dcache.overall_hits::total 34612040 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 89411 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 89411 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 280118 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 280118 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 369529 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 369529 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 369529 # number of overall misses -system.cpu.dcache.overall_misses::total 369529 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 4770299000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 4770299000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 21700228000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 21700228000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 26470527000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 26470527000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 26470527000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 26470527000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 20368192 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 20368192 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.tags.tag_accesses 70183301 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 70183301 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 20314289 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 20314289 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 14333269 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 14333269 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 34647558 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 34647558 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 34647558 # number of overall hits +system.cpu.dcache.overall_hits::total 34647558 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 61533 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 61533 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 280108 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 280108 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 341641 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 341641 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 341641 # number of overall misses +system.cpu.dcache.overall_misses::total 341641 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 2738549500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 2738549500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 21709876500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 21709876500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 24448426000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 24448426000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 24448426000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 24448426000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 20375822 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 20375822 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 14613377 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 14613377 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 34981569 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 34981569 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 34981569 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 34981569 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.004390 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.004390 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.019169 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.019169 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.010564 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.010564 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.010564 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.010564 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 53352.484594 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 53352.484594 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 77468.166987 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 77468.166987 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 71633.151931 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 71633.151931 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 71633.151931 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 71633.151931 # average overall miss latency +system.cpu.dcache.demand_accesses::cpu.data 34989199 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 34989199 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 34989199 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 34989199 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.003020 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.003020 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.019168 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.019168 # miss rate for WriteReq accesses +system.cpu.dcache.demand_miss_rate::cpu.data 0.009764 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.009764 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.009764 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.009764 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 44505.379227 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 44505.379227 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 77505.378283 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 77505.378283 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 71561.744638 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 71561.744638 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 71561.744638 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 71561.744638 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 168424 # number of writebacks -system.cpu.dcache.writebacks::total 168424 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 28112 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 28112 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 136555 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 136555 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 164667 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 164667 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 164667 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 164667 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 61299 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 61299 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 143563 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 143563 # number of WriteReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 204862 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 204862 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 204862 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 204862 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 2681247500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 2681247500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 10975422500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 10975422500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 13656670000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 13656670000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 13656670000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 13656670000 # number of overall MSHR miss cycles +system.cpu.dcache.writebacks::writebacks 168446 # number of writebacks +system.cpu.dcache.writebacks::total 168446 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 197 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 197 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 136541 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 136541 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 136738 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 136738 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 136738 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 136738 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 61336 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 61336 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 143567 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 143567 # number of WriteReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 204903 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 204903 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 204903 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 204903 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 2673829500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 2673829500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 10980283500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 10980283500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 13654113000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 13654113000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 13654113000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 13654113000 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.003010 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.003010 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.009824 # mshr miss rate for WriteReq accesses @@ -481,330 +480,332 @@ system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.005856 system.cpu.dcache.demand_mshr_miss_rate::total 0.005856 # mshr miss rate for demand accesses system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.005856 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate::total 0.005856 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 43740.477006 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 43740.477006 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 76450.216978 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 76450.216978 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 66662.777870 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 66662.777870 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 66662.777870 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 66662.777870 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 152872 # number of replacements -system.cpu.icache.tags.tagsinuse 1932.382407 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 25430610 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 154920 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 164.153176 # Average number of references to valid blocks. -system.cpu.icache.tags.warmup_cycle 42235793500 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1932.382407 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.943546 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.943546 # Average percentage of cache occupancy +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 43593.150841 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 43593.150841 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 76481.945712 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 76481.945712 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 66636.959927 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 66636.959927 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 66636.959927 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 66636.959927 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 153927 # number of replacements +system.cpu.icache.tags.tagsinuse 1931.746995 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 25490420 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 155975 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 163.426318 # Average number of references to valid blocks. +system.cpu.icache.tags.warmup_cycle 42594058500 # Cycle when the warmup percentage was hit. +system.cpu.icache.tags.occ_blocks::cpu.inst 1931.746995 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.943236 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.943236 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 2048 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 49 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 161 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::2 1 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 1039 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 798 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 48 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 164 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::2 2 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::3 1033 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 801 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 51325982 # Number of tag accesses -system.cpu.icache.tags.data_accesses 51325982 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 25430610 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 25430610 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 25430610 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 25430610 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 25430610 # number of overall hits -system.cpu.icache.overall_hits::total 25430610 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 154921 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 154921 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 154921 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 154921 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 154921 # number of overall misses -system.cpu.icache.overall_misses::total 154921 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 2483739000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 2483739000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 2483739000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 2483739000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 2483739000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 2483739000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 25585531 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 25585531 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 25585531 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 25585531 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 25585531 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 25585531 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.006055 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.006055 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.006055 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.006055 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.006055 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.006055 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 16032.293879 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 16032.293879 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 16032.293879 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 16032.293879 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 16032.293879 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 16032.293879 # average overall miss latency +system.cpu.icache.tags.tag_accesses 51448767 # Number of tag accesses +system.cpu.icache.tags.data_accesses 51448767 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 25490420 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 25490420 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 25490420 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 25490420 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 25490420 # number of overall hits +system.cpu.icache.overall_hits::total 25490420 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 155976 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 155976 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 155976 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 155976 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 155976 # number of overall misses +system.cpu.icache.overall_misses::total 155976 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 2495053500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 2495053500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 2495053500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 2495053500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 2495053500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 2495053500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 25646396 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 25646396 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 25646396 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 25646396 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 25646396 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 25646396 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.006082 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.006082 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.006082 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.006082 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.006082 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.006082 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 15996.393676 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 15996.393676 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 15996.393676 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 15996.393676 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 15996.393676 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 15996.393676 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 152872 # number of writebacks -system.cpu.icache.writebacks::total 152872 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 154921 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 154921 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 154921 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 154921 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 154921 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 154921 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 2328819000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 2328819000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 2328819000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 2328819000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 2328819000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 2328819000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.006055 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.006055 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.006055 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.006055 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.006055 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.006055 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 15032.300334 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 15032.300334 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 15032.300334 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 15032.300334 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 15032.300334 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 15032.300334 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 133382 # number of replacements -system.cpu.l2cache.tags.tagsinuse 30429.048447 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 403995 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 165492 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 2.441175 # Average number of references to valid blocks. +system.cpu.icache.writebacks::writebacks 153927 # number of writebacks +system.cpu.icache.writebacks::total 153927 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 155976 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 155976 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 155976 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 155976 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 155976 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 155976 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 2339078500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 2339078500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 2339078500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 2339078500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 2339078500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 2339078500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.006082 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.006082 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.006082 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.006082 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.006082 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.006082 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 14996.400087 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 14996.400087 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 14996.400087 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 14996.400087 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 14996.400087 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 14996.400087 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 133391 # number of replacements +system.cpu.l2cache.tags.tagsinuse 30427.789253 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 406173 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 165503 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 2.454173 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 26350.763451 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 2094.967777 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 1983.317219 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.804161 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.063933 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.060526 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.928621 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 32110 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 165 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 1093 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 11874 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 18854 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 26336.336681 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 2098.353555 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 1993.099017 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.803721 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.064037 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.060825 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.928582 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 32112 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 164 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 1064 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 11613 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 19147 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::4 124 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.979919 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 6016424 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 6016424 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 168424 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 168424 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 152872 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 152872 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 12681 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 12681 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 148157 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 148157 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 33594 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 33594 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 148157 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 46275 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 194432 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 148157 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 46275 # number of overall hits -system.cpu.l2cache.overall_hits::total 194432 # number of overall hits +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.979980 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 6033974 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 6033974 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 168446 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 168446 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 153927 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 153927 # number of WritebackClean hits +system.cpu.l2cache.ReadExReq_hits::cpu.data 12684 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 12684 # number of ReadExReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 149204 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 149204 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 33621 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 33621 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 149204 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 46305 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 195509 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 149204 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 46305 # number of overall hits +system.cpu.l2cache.overall_hits::total 195509 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 130883 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 130883 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 6764 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 6764 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 27704 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 27704 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 6764 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 158587 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 165351 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 6764 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 158587 # number of overall misses -system.cpu.l2cache.overall_misses::total 165351 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 10626878000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 10626878000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 540586000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 540586000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 2236085500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 2236085500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 540586000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 12862963500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 13403549500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 540586000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 12862963500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 13403549500 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 168424 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 168424 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 152872 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 152872 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 143564 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 143564 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 154921 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 154921 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 61298 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 61298 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 154921 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 204862 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 359783 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 154921 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 204862 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 359783 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.911670 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.911670 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.043661 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.043661 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.451956 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.451956 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.043661 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.774116 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.459585 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.043661 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.774116 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.459585 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 81193.722638 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 81193.722638 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79921.052632 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79921.052632 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80713.452931 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80713.452931 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79921.052632 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 81109.822999 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 81061.194066 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79921.052632 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 81109.822999 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 81061.194066 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 6772 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 6772 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 27715 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 27715 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 6772 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 158598 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 165370 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 6772 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 158598 # number of overall misses +system.cpu.l2cache.overall_misses::total 165370 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 10631688000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 10631688000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 538317500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 538317500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 2228543000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 2228543000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 538317500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 12860231000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 13398548500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 538317500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 12860231000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 13398548500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 168446 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 168446 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 153927 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 153927 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 143567 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 143567 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 155976 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 155976 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 61336 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 61336 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 155976 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 204903 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 360879 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 155976 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 204903 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 360879 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.911651 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.911651 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.043417 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.043417 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.451855 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.451855 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.043417 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.774015 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.458242 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.043417 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.774015 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.458242 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 81230.473018 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 81230.473018 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79491.656822 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79491.656822 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80409.272957 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80409.272957 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79491.656822 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 81086.968310 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 81021.639354 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79491.656822 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 81086.968310 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 81021.639354 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.writebacks::writebacks 114469 # number of writebacks -system.cpu.l2cache.writebacks::total 114469 # number of writebacks +system.cpu.l2cache.writebacks::writebacks 114468 # number of writebacks +system.cpu.l2cache.writebacks::total 114468 # number of writebacks system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 115 # number of CleanEvict MSHR misses system.cpu.l2cache.CleanEvict_mshr_misses::total 115 # number of CleanEvict MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 130883 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 130883 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 6764 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 6764 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 27704 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 27704 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 6764 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 158587 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 165351 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 6764 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 158587 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 165351 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 9318048000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 9318048000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 472956000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 472956000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 1959045500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 1959045500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 472956000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 11277093500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 11750049500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 472956000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 11277093500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 11750049500 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 6772 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 6772 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 27715 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 27715 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 6772 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 158598 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 165370 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 6772 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 158598 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 165370 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 9322858000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 9322858000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 470607500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 470607500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 1951393000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 1951393000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 470607500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 11274251000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 11744858500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 470607500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 11274251000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 11744858500 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.911670 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.911670 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.043661 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.043661 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.451956 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.451956 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.043661 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.774116 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.459585 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.043661 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.774116 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.459585 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 71193.722638 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 71193.722638 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69922.531047 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69922.531047 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70713.452931 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70713.452931 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69922.531047 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 71109.822999 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 71061.254543 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69922.531047 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 71109.822999 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 71061.254543 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 713421 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 353638 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.911651 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.911651 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.043417 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.043417 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.451855 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.451855 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.043417 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.774015 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.458242 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.043417 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.774015 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.458242 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 71230.473018 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 71230.473018 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69493.133491 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69493.133491 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70409.272957 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70409.272957 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69493.133491 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 71086.968310 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 71021.699825 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69493.133491 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 71086.968310 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 71021.699825 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 715613 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 354734 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 4037 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 4037 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 4027 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 4027 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 216218 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 282893 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 152872 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 51255 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 143564 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 143564 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 154921 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 61298 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 462713 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 610490 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 1073203 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 19698688 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 23890304 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 43588992 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 133382 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 493165 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.008186 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.090105 # Request fanout histogram +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 217311 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 282914 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 153927 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 51284 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 143567 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 143567 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 155976 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 61336 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 465878 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 610613 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 1076491 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 19833728 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 23894336 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 43728064 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 133391 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 7325952 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 494270 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.008147 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.089894 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 489128 99.18% 99.18% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 4037 0.82% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 490243 99.19% 99.19% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 4027 0.81% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 493165 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 678006500 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 494270 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 680179500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 1.1 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 232381497 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 233962999 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.4 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 307297491 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 307359989 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.5 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 59447065000 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 34467 # Transaction distribution -system.membus.trans_dist::WritebackDirty 114469 # Transaction distribution -system.membus.trans_dist::CleanEvict 14990 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 60000593000 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 34486 # Transaction distribution +system.membus.trans_dist::WritebackDirty 114468 # Transaction distribution +system.membus.trans_dist::CleanEvict 15010 # Transaction distribution system.membus.trans_dist::ReadExReq 130883 # Transaction distribution system.membus.trans_dist::ReadExResp 130883 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 34467 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 460159 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 460159 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 17908416 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 17908416 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 34486 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 460216 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 460216 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 17909568 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 17909568 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 294809 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 294847 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 294809 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 294847 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 294809 # Request fanout histogram -system.membus.reqLayer0.occupancy 822950500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 294847 # Request fanout histogram +system.membus.reqLayer0.occupancy 819183500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 1.4 # Layer utilization (%) -system.membus.respLayer1.occupancy 872961750 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 873079500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 1.5 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/config.ini b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/config.ini index 3a9ebdb7f..d19d770e5 100644 --- a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -68,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -104,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -143,11 +157,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -155,13 +176,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -171,6 +197,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -179,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -502,13 +534,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -518,6 +555,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -526,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -551,13 +594,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -567,6 +615,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -575,19 +624,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -595,6 +656,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -609,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/vortex +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/vortex gid=100 input=cin kvmInSE=false @@ -641,9 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -687,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -698,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simerr b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simerr +++ b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simout b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simout index 97f01e80c..e4880ad37 100755 --- a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simout +++ b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/simout @@ -1,12 +1,14 @@ +Redirecting stdout to build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/o3-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/o3-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Apr 22 2015 07:55:25 -gem5 started Apr 22 2015 08:46:29 -gem5 executing on phenom -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/o3-timing -re /home/stever/hg/m5sim.org/gem5/tests/run.py build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4308 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/50.vortex/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/50.vortex/alpha/tru64/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. -Exiting @ tick 22578120000 because target called exit() +Exiting @ tick 22275010500 because target called exit() diff --git a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/stats.txt b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/stats.txt index a7431aca8..4fef80875 100644 --- a/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/long/se/50.vortex/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.022275 # Nu sim_ticks 22275010500 # Number of ticks simulated final_tick 22275010500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 330986 # Simulator instruction rate (inst/s) -host_op_rate 330986 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 92631737 # Simulator tick rate (ticks/s) -host_mem_usage 306452 # Number of bytes of host memory used -host_seconds 240.47 # Real time elapsed on the host +host_inst_rate 202670 # Simulator instruction rate (inst/s) +host_op_rate 202670 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 56720302 # Simulator tick rate (ticks/s) +host_mem_usage 259380 # Number of bytes of host memory used +host_seconds 392.72 # Real time elapsed on the host sim_insts 79591756 # Number of instructions simulated sim_ops 79591756 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1021,6 +1021,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 23956480 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 35644928 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 133082 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 7322816 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 430937 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.009387 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.096428 # Request fanout histogram @@ -1050,6 +1051,7 @@ system.membus.pkt_count::total 459247 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 17886016 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 17886016 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 294197 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/config.ini b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/config.ini index 4b3e2746a..7debe9727 100644 --- a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -57,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -101,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -122,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -135,12 +155,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -159,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -183,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -199,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -595,12 +635,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -619,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -678,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -694,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -707,12 +767,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -731,8 +796,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -740,10 +810,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -774,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/vortex +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/vortex gid=100 input=cin kvmInSE=false @@ -806,10 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -853,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -864,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simerr b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simerr index f9e2ef3b2..bbcd9d751 100755 --- a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simerr @@ -1 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simout b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simout index 9ad30ac44..9e5ee29fe 100755 --- a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/simout @@ -3,12 +3,12 @@ Redirecting stderr to build/ARM/tests/opt/long/se/50.vortex/arm/linux/minor-timi gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:51:04 -gem5 started Mar 16 2016 16:24:45 -gem5 executing on dinar2c11, pid 15928 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/50.vortex/arm/linux/minor-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/50.vortex/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:05:27 +gem5 executing on e108600-lin, pid 24209 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/50.vortex/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/50.vortex/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. -Exiting @ tick 56966152500 because target called exit() +Exiting @ tick 58768125500 because target called exit() diff --git a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/stats.txt index 4b73022fa..50bae5738 100644 --- a/tests/long/se/50.vortex/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/50.vortex/ref/arm/linux/minor-timing/stats.txt @@ -1,106 +1,106 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.056803 # Number of seconds simulated -sim_ticks 56802974500 # Number of ticks simulated -final_tick 56802974500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.058768 # Number of seconds simulated +sim_ticks 58768125500 # Number of ticks simulated +final_tick 58768125500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 307576 # Simulator instruction rate (inst/s) -host_op_rate 393344 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 246367888 # Simulator tick rate (ticks/s) -host_mem_usage 323312 # Number of bytes of host memory used -host_seconds 230.56 # Real time elapsed on the host +host_inst_rate 140139 # Simulator instruction rate (inst/s) +host_op_rate 179217 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 116134728 # Simulator tick rate (ticks/s) +host_mem_usage 275656 # Number of bytes of host memory used +host_seconds 506.03 # Real time elapsed on the host sim_insts 70915150 # Number of instructions simulated sim_ops 90690106 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 285504 # Number of bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 285632 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 7924672 # Number of bytes read from this memory -system.physmem.bytes_read::total 8210176 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 285504 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 285504 # Number of instructions bytes read from this memory -system.physmem.bytes_written::writebacks 5517760 # Number of bytes written to this memory -system.physmem.bytes_written::total 5517760 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 4461 # Number of read requests responded to by this memory +system.physmem.bytes_read::total 8210304 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 285632 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 285632 # Number of instructions bytes read from this memory +system.physmem.bytes_written::writebacks 5517568 # Number of bytes written to this memory +system.physmem.bytes_written::total 5517568 # Number of bytes written to this memory +system.physmem.num_reads::cpu.inst 4463 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 123823 # Number of read requests responded to by this memory -system.physmem.num_reads::total 128284 # Number of read requests responded to by this memory -system.physmem.num_writes::writebacks 86215 # Number of write requests responded to by this memory -system.physmem.num_writes::total 86215 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 5026216 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 139511567 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 144537783 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 5026216 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 5026216 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 97138575 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 97138575 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 97138575 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 5026216 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 139511567 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 241676358 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 128284 # Number of read requests accepted -system.physmem.writeReqs 86215 # Number of write requests accepted -system.physmem.readBursts 128284 # Number of DRAM read bursts, including those serviced by the write queue -system.physmem.writeBursts 86215 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 8209792 # Total number of bytes read from DRAM +system.physmem.num_reads::total 128286 # Number of read requests responded to by this memory +system.physmem.num_writes::writebacks 86212 # Number of write requests responded to by this memory +system.physmem.num_writes::total 86212 # Number of write requests responded to by this memory +system.physmem.bw_read::cpu.inst 4860322 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 134846431 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 139706753 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 4860322 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 4860322 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 93887085 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 93887085 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 93887085 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 4860322 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 134846431 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 233593838 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 128286 # Number of read requests accepted +system.physmem.writeReqs 86212 # Number of write requests accepted +system.physmem.readBursts 128286 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.writeBursts 86212 # Number of DRAM write bursts, including those merged in the write queue +system.physmem.bytesReadDRAM 8209920 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 384 # Total number of bytes read from write queue -system.physmem.bytesWritten 5515904 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 8210176 # Total read bytes from the system interface side -system.physmem.bytesWrittenSys 5517760 # Total written bytes from the system interface side +system.physmem.bytesWritten 5515840 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 8210304 # Total read bytes from the system interface side +system.physmem.bytesWrittenSys 5517568 # Total written bytes from the system interface side system.physmem.servicedByWrQ 6 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 8062 # Per bank write bursts -system.physmem.perBankRdBursts::1 8315 # Per bank write bursts -system.physmem.perBankRdBursts::2 8233 # Per bank write bursts +system.physmem.perBankRdBursts::0 8065 # Per bank write bursts +system.physmem.perBankRdBursts::1 8314 # Per bank write bursts +system.physmem.perBankRdBursts::2 8239 # Per bank write bursts system.physmem.perBankRdBursts::3 8142 # Per bank write bursts system.physmem.perBankRdBursts::4 8284 # Per bank write bursts -system.physmem.perBankRdBursts::5 8403 # Per bank write bursts -system.physmem.perBankRdBursts::6 8055 # Per bank write bursts -system.physmem.perBankRdBursts::7 7916 # Per bank write bursts +system.physmem.perBankRdBursts::5 8404 # Per bank write bursts +system.physmem.perBankRdBursts::6 8054 # Per bank write bursts +system.physmem.perBankRdBursts::7 7915 # Per bank write bursts system.physmem.perBankRdBursts::8 8035 # Per bank write bursts -system.physmem.perBankRdBursts::9 7587 # Per bank write bursts +system.physmem.perBankRdBursts::9 7585 # Per bank write bursts system.physmem.perBankRdBursts::10 7763 # Per bank write bursts -system.physmem.perBankRdBursts::11 7815 # Per bank write bursts +system.physmem.perBankRdBursts::11 7814 # Per bank write bursts system.physmem.perBankRdBursts::12 7871 # Per bank write bursts -system.physmem.perBankRdBursts::13 7867 # Per bank write bursts -system.physmem.perBankRdBursts::14 7968 # Per bank write bursts +system.physmem.perBankRdBursts::13 7866 # Per bank write bursts +system.physmem.perBankRdBursts::14 7967 # Per bank write bursts system.physmem.perBankRdBursts::15 7962 # Per bank write bursts system.physmem.perBankWrBursts::0 5395 # Per bank write bursts system.physmem.perBankWrBursts::1 5541 # Per bank write bursts system.physmem.perBankWrBursts::2 5468 # Per bank write bursts system.physmem.perBankWrBursts::3 5336 # Per bank write bursts -system.physmem.perBankWrBursts::4 5366 # Per bank write bursts -system.physmem.perBankWrBursts::5 5560 # Per bank write bursts -system.physmem.perBankWrBursts::6 5257 # Per bank write bursts -system.physmem.perBankWrBursts::7 5179 # Per bank write bursts +system.physmem.perBankWrBursts::4 5363 # Per bank write bursts +system.physmem.perBankWrBursts::5 5561 # Per bank write bursts +system.physmem.perBankWrBursts::6 5259 # Per bank write bursts +system.physmem.perBankWrBursts::7 5180 # Per bank write bursts system.physmem.perBankWrBursts::8 5154 # Per bank write bursts -system.physmem.perBankWrBursts::9 5105 # Per bank write bursts -system.physmem.perBankWrBursts::10 5292 # Per bank write bursts +system.physmem.perBankWrBursts::9 5103 # Per bank write bursts +system.physmem.perBankWrBursts::10 5293 # Per bank write bursts system.physmem.perBankWrBursts::11 5270 # Per bank write bursts system.physmem.perBankWrBursts::12 5531 # Per bank write bursts system.physmem.perBankWrBursts::13 5597 # Per bank write bursts system.physmem.perBankWrBursts::14 5703 # Per bank write bursts -system.physmem.perBankWrBursts::15 5432 # Per bank write bursts +system.physmem.perBankWrBursts::15 5431 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 56802942500 # Total gap between requests +system.physmem.totGap 58768094000 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 128284 # Read request sizes (log2) +system.physmem.readPktSize::6 128286 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) system.physmem.writePktSize::3 0 # Write request sizes (log2) system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) -system.physmem.writePktSize::6 86215 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 116125 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 12132 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 21 # What read queue length does an incoming req see +system.physmem.writePktSize::6 86212 # Write request sizes (log2) +system.physmem.rdQLenPdf::0 116156 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 12104 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 20 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,34 +145,34 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 631 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 643 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 4122 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 5183 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 5277 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 5318 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 5309 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 5314 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 5323 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 5321 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 5353 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 5383 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 5464 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 5436 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 5495 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 5851 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 5447 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 5305 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 15 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 628 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 635 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 4059 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 5180 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 5287 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 5319 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 5314 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 5316 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 5321 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 5334 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 5362 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 5346 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 5514 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 5445 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 5466 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 5870 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 5486 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 5303 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 9 # What write queue length does an incoming req see system.physmem.wrQLenPdf::34 2 # What write queue length does an incoming req see system.physmem.wrQLenPdf::35 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::36 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::37 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::38 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::39 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::40 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::41 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::42 1 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::36 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::37 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::38 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::39 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::40 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::41 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::42 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::43 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::44 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::45 0 # What write queue length does an incoming req see @@ -194,108 +194,106 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 38880 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 352.990947 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 214.489872 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 335.589979 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 12269 31.56% 31.56% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 8336 21.44% 53.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 4191 10.78% 63.78% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 2845 7.32% 71.09% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 2490 6.40% 77.50% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 1681 4.32% 81.82% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 1302 3.35% 85.17% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 1149 2.96% 88.12% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 4617 11.88% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 38880 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 5294 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 24.227616 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 352.423208 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::0-1023 5291 99.94% 99.94% # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 38803 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 353.665026 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 214.783131 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 335.990632 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 12260 31.60% 31.60% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 8290 21.36% 52.96% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 4146 10.68% 63.64% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 2807 7.23% 70.88% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 2540 6.55% 77.42% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 1701 4.38% 81.81% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 1262 3.25% 85.06% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 1176 3.03% 88.09% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 4621 11.91% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 38803 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 5298 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 24.212911 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 352.385643 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::0-1023 5295 99.94% 99.94% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::1024-2047 1 0.02% 99.96% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::2048-3071 1 0.02% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::24576-25599 1 0.02% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 5294 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 5294 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 16.279940 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 16.260845 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 0.856304 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16 4659 88.01% 88.01% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::17 4 0.08% 88.08% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 483 9.12% 97.20% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::19 119 2.25% 99.45% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::20 16 0.30% 99.75% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::21 8 0.15% 99.91% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::22 3 0.06% 99.96% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::23 1 0.02% 99.98% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::42 1 0.02% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 5294 # Writes before turning the bus around for reads -system.physmem.totQLat 1681541750 # Total ticks spent queuing -system.physmem.totMemAccLat 4086754250 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 641390000 # Total ticks spent in databus transfers -system.physmem.avgQLat 13108.57 # Average queueing delay per DRAM burst +system.physmem.rdPerTurnAround::total 5298 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 5297 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 16.269398 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 16.253066 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 0.759205 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16 4663 88.03% 88.03% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::17 7 0.13% 88.16% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 496 9.36% 97.53% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::19 106 2.00% 99.53% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::20 16 0.30% 99.83% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::21 8 0.15% 99.98% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::22 1 0.02% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 5297 # Writes before turning the bus around for reads +system.physmem.totQLat 1679255750 # Total ticks spent queuing +system.physmem.totMemAccLat 4084505750 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 641400000 # Total ticks spent in databus transfers +system.physmem.avgQLat 13090.55 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 31858.57 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 144.53 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 97.11 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 144.54 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 97.14 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 31840.55 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 139.70 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 93.86 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 139.71 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 93.89 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 1.89 # Data bus utilization in percentage -system.physmem.busUtilRead 1.13 # Data bus utilization in percentage for reads -system.physmem.busUtilWrite 0.76 # Data bus utilization in percentage for writes +system.physmem.busUtil 1.82 # Data bus utilization in percentage +system.physmem.busUtilRead 1.09 # Data bus utilization in percentage for reads +system.physmem.busUtilWrite 0.73 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.03 # Average read queue length when enqueuing -system.physmem.avgWrQLen 23.24 # Average write queue length when enqueuing -system.physmem.readRowHits 111837 # Number of row buffer hits during reads -system.physmem.writeRowHits 63741 # Number of row buffer hits during writes -system.physmem.readRowHitRate 87.18 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 73.93 # Row buffer hit rate for writes -system.physmem.avgGap 264816.82 # Average gap between requests -system.physmem.pageHitRate 81.86 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 153127800 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 83551875 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 510073200 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 279223200 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 3709945200 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 11545672905 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 23952789000 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 40234383180 # Total energy per rank (pJ) -system.physmem_0.averagePower 708.339923 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 39720213500 # Time in different power states -system.physmem_0.memoryStateTime::REF 1896700000 # Time in different power states +system.physmem.avgWrQLen 23.33 # Average write queue length when enqueuing +system.physmem.readRowHits 111800 # Number of row buffer hits during reads +system.physmem.writeRowHits 63851 # Number of row buffer hits during writes +system.physmem.readRowHitRate 87.15 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 74.06 # Row buffer hit rate for writes +system.physmem.avgGap 273979.68 # Average gap between requests +system.physmem.pageHitRate 81.89 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 153014400 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 83490000 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 509886000 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 279190800 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 3838102320 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 11659704255 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 25030042500 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 41553430275 # Total energy per rank (pJ) +system.physmem_0.averagePower 707.134890 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 41510709500 # Time in different power states +system.physmem_0.memoryStateTime::REF 1962220000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 15184054000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 15290173000 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 140767200 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 76807500 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 490315800 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 279158400 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 3709945200 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 11005773750 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 24426384750 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 40129152600 # Total energy per rank (pJ) -system.physmem_1.averagePower 706.487303 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 40510168000 # Time in different power states -system.physmem_1.memoryStateTime::REF 1896700000 # Time in different power states +system.physmem_1.actEnergy 140215320 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 76506375 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 490152000 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 279145440 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 3838102320 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 11133864720 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 25491305250 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 41449291425 # Total energy per rank (pJ) +system.physmem_1.averagePower 705.362708 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 42280803500 # Time in different power states +system.physmem_1.memoryStateTime::REF 1962220000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 14394586000 # Time in different power states +system.physmem_1.memoryStateTime::ACT 14520166000 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 14774616 # Number of BP lookups -system.cpu.branchPred.condPredicted 9890616 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 339334 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 9548677 # Number of BTB lookups -system.cpu.branchPred.BTBHits 6547888 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 14827521 # Number of BP lookups +system.cpu.branchPred.condPredicted 9922528 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 342114 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 9663077 # Number of BTB lookups +system.cpu.branchPred.BTBHits 6571727 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 68.573772 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 1714315 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 68.008637 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 1719937 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 4 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 174550 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 157999 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 16551 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 24800 # Number of mispredicted indirect branches. +system.cpu.branchPred.indirectLookups 176106 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 158425 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 17681 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 24889 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -325,7 +323,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -355,7 +353,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -385,7 +383,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -416,16 +414,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 1946 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 113605949 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 117536251 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 70915150 # Number of instructions committed system.cpu.committedOps 90690106 # Number of ops (including micro ops) committed -system.cpu.discardedOps 1137741 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 1179302 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.601998 # CPI: cycles per instruction -system.cpu.ipc 0.624220 # IPC: instructions per cycle +system.cpu.cpi 1.657421 # CPI: cycles per instruction +system.cpu.ipc 0.603347 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 47187979 52.03% 52.03% # Class of committed instruction system.cpu.op_class_0::IntMult 80119 0.09% 52.12% # Class of committed instruction @@ -461,471 +459,474 @@ system.cpu.op_class_0::MemWrite 20555739 22.67% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 90690106 # Class of committed instruction -system.cpu.tickCycles 95311103 # Number of cycles that the object actually ticked -system.cpu.idleCycles 18294846 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 156448 # number of replacements -system.cpu.dcache.tags.tagsinuse 4067.225830 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 42620314 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 160544 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 265.474350 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 820768500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4067.225830 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.992975 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.992975 # Average percentage of cache occupancy +system.cpu.tickCycles 97988256 # Number of cycles that the object actually ticked +system.cpu.idleCycles 19547995 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 156444 # number of replacements +system.cpu.dcache.tags.tagsinuse 4068.129500 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 42637241 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 160540 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 265.586402 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 821026500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4068.129500 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.993196 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.993196 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 44 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 1099 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 2953 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 1100 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 2952 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 86009120 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 86009120 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 22862903 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 22862903 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 19642172 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 19642172 # number of WriteReq hits -system.cpu.dcache.SoftPFReq_hits::cpu.data 83401 # number of SoftPFReq hits -system.cpu.dcache.SoftPFReq_hits::total 83401 # number of SoftPFReq hits +system.cpu.dcache.tags.tag_accesses 86035236 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 86035236 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 22879875 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 22879875 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 19642158 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 19642158 # number of WriteReq hits +system.cpu.dcache.SoftPFReq_hits::cpu.data 83370 # number of SoftPFReq hits +system.cpu.dcache.SoftPFReq_hits::total 83370 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 15919 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 15919 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 15919 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 15919 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 42505075 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 42505075 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 42588476 # number of overall hits -system.cpu.dcache.overall_hits::total 42588476 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 51661 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 51661 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 207729 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 207729 # number of WriteReq misses -system.cpu.dcache.SoftPFReq_misses::cpu.data 44584 # number of SoftPFReq misses -system.cpu.dcache.SoftPFReq_misses::total 44584 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 259390 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 259390 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 303974 # number of overall misses -system.cpu.dcache.overall_misses::total 303974 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 1490194000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 1490194000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 16811157000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 16811157000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 18301351000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 18301351000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 18301351000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 18301351000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 22914564 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 22914564 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_hits::cpu.data 42522033 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 42522033 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 42605403 # number of overall hits +system.cpu.dcache.overall_hits::total 42605403 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 47768 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 47768 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 207743 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 207743 # number of WriteReq misses +system.cpu.dcache.SoftPFReq_misses::cpu.data 44596 # number of SoftPFReq misses +system.cpu.dcache.SoftPFReq_misses::total 44596 # number of SoftPFReq misses +system.cpu.dcache.demand_misses::cpu.data 255511 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 255511 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 300107 # number of overall misses +system.cpu.dcache.overall_misses::total 300107 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 1443300500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 1443300500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 16810663000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 16810663000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 18253963500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 18253963500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 18253963500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 18253963500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 22927643 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 22927643 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 19849901 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 19849901 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::cpu.data 127985 # number of SoftPFReq accesses(hits+misses) -system.cpu.dcache.SoftPFReq_accesses::total 127985 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::cpu.data 127966 # number of SoftPFReq accesses(hits+misses) +system.cpu.dcache.SoftPFReq_accesses::total 127966 # number of SoftPFReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::cpu.data 15919 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.LoadLockedReq_accesses::total 15919 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 15919 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 15919 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 42764465 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 42764465 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 42892450 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 42892450 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.002255 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.002255 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.010465 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.010465 # miss rate for WriteReq accesses -system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.348353 # miss rate for SoftPFReq accesses -system.cpu.dcache.SoftPFReq_miss_rate::total 0.348353 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.006066 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.006066 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.007087 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.007087 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 28845.628230 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 28845.628230 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 80928.310443 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 80928.310443 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 70555.345233 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 70555.345233 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 60206.961780 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 60206.961780 # average overall miss latency +system.cpu.dcache.demand_accesses::cpu.data 42777544 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 42777544 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 42905510 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 42905510 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.002083 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.002083 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.010466 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.010466 # miss rate for WriteReq accesses +system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.348499 # miss rate for SoftPFReq accesses +system.cpu.dcache.SoftPFReq_miss_rate::total 0.348499 # miss rate for SoftPFReq accesses +system.cpu.dcache.demand_miss_rate::cpu.data 0.005973 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.005973 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.006995 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.006995 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 30214.798610 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 30214.798610 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 80920.478668 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 80920.478668 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 71441.008411 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 71441.008411 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 60824.850803 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 60824.850803 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 128389 # number of writebacks -system.cpu.dcache.writebacks::total 128389 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 22138 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 22138 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 100695 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 100695 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 122833 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 122833 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 122833 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 122833 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 29523 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 29523 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 107034 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 107034 # number of WriteReq MSHR misses -system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 23987 # number of SoftPFReq MSHR misses -system.cpu.dcache.SoftPFReq_mshr_misses::total 23987 # number of SoftPFReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 136557 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 136557 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 160544 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 160544 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 578329500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 578329500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 8490118500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 8490118500 # number of WriteReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 1713467500 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 1713467500 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 9068448000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 9068448000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 10781915500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 10781915500 # number of overall MSHR miss cycles +system.cpu.dcache.writebacks::writebacks 128383 # number of writebacks +system.cpu.dcache.writebacks::total 128383 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 18246 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 18246 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 100706 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 100706 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 118952 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 118952 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 118952 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 118952 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 29522 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 29522 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 107037 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 107037 # number of WriteReq MSHR misses +system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 23981 # number of SoftPFReq MSHR misses +system.cpu.dcache.SoftPFReq_mshr_misses::total 23981 # number of SoftPFReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 136559 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 136559 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 160540 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 160540 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 576668000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 576668000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 8488003000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 8488003000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 1709526500 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 1709526500 # number of SoftPFReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 9064671000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 9064671000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 10774197500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 10774197500 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.001288 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.001288 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.005392 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.005392 # mshr miss rate for WriteReq accesses -system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.187420 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.187420 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.003193 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.003193 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.003743 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.003743 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 19589.116960 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 19589.116960 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 79321.696844 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 79321.696844 # average WriteReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 71433.172135 # average SoftPFReq mshr miss latency -system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 71433.172135 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 66407.785760 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 66407.785760 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 67158.632524 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 67158.632524 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 43497 # number of replacements -system.cpu.icache.tags.tagsinuse 1852.676989 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 24844377 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 45539 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 545.562639 # Average number of references to valid blocks. +system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.187401 # mshr miss rate for SoftPFReq accesses +system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.187401 # mshr miss rate for SoftPFReq accesses +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.003192 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.003192 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.003742 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.003742 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 19533.500440 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 19533.500440 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 79299.709446 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 79299.709446 # average WriteReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 71286.706142 # average SoftPFReq mshr miss latency +system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 71286.706142 # average SoftPFReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 66379.154798 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 66379.154798 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 67112.230597 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 67112.230597 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 43538 # number of replacements +system.cpu.icache.tags.tagsinuse 1854.967198 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 25047260 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 45580 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 549.523036 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1852.676989 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.904627 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.904627 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 1854.967198 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.905746 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.905746 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 2042 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 76 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 46 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 915 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1005 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 77 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 45 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::2 1 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::3 907 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1012 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.997070 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 49825373 # Number of tag accesses -system.cpu.icache.tags.data_accesses 49825373 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 24844377 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 24844377 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 24844377 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 24844377 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 24844377 # number of overall hits -system.cpu.icache.overall_hits::total 24844377 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 45540 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 45540 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 45540 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 45540 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 45540 # number of overall misses -system.cpu.icache.overall_misses::total 45540 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 905103000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 905103000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 905103000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 905103000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 905103000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 905103000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 24889917 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 24889917 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 24889917 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 24889917 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 24889917 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 24889917 # number of overall (read+write) accesses -system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.001830 # miss rate for ReadReq accesses -system.cpu.icache.ReadReq_miss_rate::total 0.001830 # miss rate for ReadReq accesses -system.cpu.icache.demand_miss_rate::cpu.inst 0.001830 # miss rate for demand accesses -system.cpu.icache.demand_miss_rate::total 0.001830 # miss rate for demand accesses -system.cpu.icache.overall_miss_rate::cpu.inst 0.001830 # miss rate for overall accesses -system.cpu.icache.overall_miss_rate::total 0.001830 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 19874.901186 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 19874.901186 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 19874.901186 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 19874.901186 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 19874.901186 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 19874.901186 # average overall miss latency +system.cpu.icache.tags.tag_accesses 50231262 # Number of tag accesses +system.cpu.icache.tags.data_accesses 50231262 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 25047260 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 25047260 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 25047260 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 25047260 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 25047260 # number of overall hits +system.cpu.icache.overall_hits::total 25047260 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 45581 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 45581 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 45581 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 45581 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 45581 # number of overall misses +system.cpu.icache.overall_misses::total 45581 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 906370500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 906370500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 906370500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 906370500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 906370500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 906370500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 25092841 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 25092841 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 25092841 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 25092841 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 25092841 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 25092841 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.001816 # miss rate for ReadReq accesses +system.cpu.icache.ReadReq_miss_rate::total 0.001816 # miss rate for ReadReq accesses +system.cpu.icache.demand_miss_rate::cpu.inst 0.001816 # miss rate for demand accesses +system.cpu.icache.demand_miss_rate::total 0.001816 # miss rate for demand accesses +system.cpu.icache.overall_miss_rate::cpu.inst 0.001816 # miss rate for overall accesses +system.cpu.icache.overall_miss_rate::total 0.001816 # miss rate for overall accesses +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 19884.831399 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 19884.831399 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 19884.831399 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 19884.831399 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 19884.831399 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 19884.831399 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 43497 # number of writebacks -system.cpu.icache.writebacks::total 43497 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 45540 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 45540 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 45540 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 45540 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 45540 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 45540 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 859564000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 859564000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 859564000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 859564000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 859564000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 859564000 # number of overall MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.001830 # mshr miss rate for ReadReq accesses -system.cpu.icache.ReadReq_mshr_miss_rate::total 0.001830 # mshr miss rate for ReadReq accesses -system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.001830 # mshr miss rate for demand accesses -system.cpu.icache.demand_mshr_miss_rate::total 0.001830 # mshr miss rate for demand accesses -system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.001830 # mshr miss rate for overall accesses -system.cpu.icache.overall_mshr_miss_rate::total 0.001830 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 18874.923144 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 18874.923144 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 18874.923144 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 18874.923144 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 18874.923144 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 18874.923144 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 96391 # number of replacements -system.cpu.l2cache.tags.tagsinuse 29870.997301 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 163417 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 127542 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 1.281280 # Average number of references to valid blocks. +system.cpu.icache.writebacks::writebacks 43538 # number of writebacks +system.cpu.icache.writebacks::total 43538 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 45581 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 45581 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 45581 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 45581 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 45581 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 45581 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 860790500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 860790500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 860790500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 860790500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 860790500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 860790500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.001816 # mshr miss rate for ReadReq accesses +system.cpu.icache.ReadReq_mshr_miss_rate::total 0.001816 # mshr miss rate for ReadReq accesses +system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.001816 # mshr miss rate for demand accesses +system.cpu.icache.demand_mshr_miss_rate::total 0.001816 # mshr miss rate for demand accesses +system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.001816 # mshr miss rate for overall accesses +system.cpu.icache.overall_mshr_miss_rate::total 0.001816 # mshr miss rate for overall accesses +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 18884.853338 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 18884.853338 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 18884.853338 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 18884.853338 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 18884.853338 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 18884.853338 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 96393 # number of replacements +system.cpu.l2cache.tags.tagsinuse 29915.680999 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 163475 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 127546 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 1.281694 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 26781.820547 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 1433.103835 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 1656.072920 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.817316 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.043735 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.050539 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.911590 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 31151 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 191 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 26835.960013 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 1436.225853 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 1643.495133 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.818969 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.043830 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.050155 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.912954 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 31153 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 193 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 1859 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 12725 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 15781 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 595 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.950653 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 3420152 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 3420152 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 128389 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 128389 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 39908 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 39908 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 4752 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 4752 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 41065 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 41065 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 31907 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 31907 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 41065 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 36659 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 77724 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 41065 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 36659 # number of overall hits -system.cpu.l2cache.overall_hits::total 77724 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 102282 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 102282 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 4475 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 4475 # number of ReadCleanReq misses +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 12744 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 15761 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 596 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.950714 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 3420655 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 3420655 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 128383 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 128383 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 39935 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 39935 # number of WritebackClean hits +system.cpu.l2cache.ReadExReq_hits::cpu.data 4757 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 4757 # number of ReadExReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 41105 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 41105 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 31900 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 31900 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 41105 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 36657 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 77762 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 41105 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 36657 # number of overall hits +system.cpu.l2cache.overall_hits::total 77762 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 102280 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 102280 # number of ReadExReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 4476 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 4476 # number of ReadCleanReq misses system.cpu.l2cache.ReadSharedReq_misses::cpu.data 21603 # number of ReadSharedReq misses system.cpu.l2cache.ReadSharedReq_misses::total 21603 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 4475 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 123885 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 128360 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 4475 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 123885 # number of overall misses -system.cpu.l2cache.overall_misses::total 128360 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 8279623500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 8279623500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 356201500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 356201500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 1872087500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 1872087500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 356201500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 10151711000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 10507912500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 356201500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 10151711000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 10507912500 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 128389 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 128389 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 39908 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 39908 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 107034 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 107034 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 45540 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 45540 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 53510 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 53510 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 45540 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 160544 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 206084 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 45540 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 160544 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 206084 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.955603 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.955603 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.098265 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.098265 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.403719 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.403719 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.098265 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.771658 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.622853 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.098265 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.771658 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.622853 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 80948.979293 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 80948.979293 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79598.100559 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79598.100559 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 86658.681665 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 86658.681665 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79598.100559 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 81944.634136 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 81862.827205 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79598.100559 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 81944.634136 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 81862.827205 # average overall miss latency +system.cpu.l2cache.demand_misses::cpu.inst 4476 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 123883 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 128359 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 4476 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 123883 # number of overall misses +system.cpu.l2cache.overall_misses::total 128359 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 8277452000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 8277452000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 356943000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 356943000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 1866770000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 1866770000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 356943000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 10144222000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 10501165000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 356943000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 10144222000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 10501165000 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 128383 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 128383 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 39935 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 39935 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 107037 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 107037 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 45581 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 45581 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 53503 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 53503 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 45581 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 160540 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 206121 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 45581 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 160540 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 206121 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.955557 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.955557 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.098199 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.098199 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.403772 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.403772 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.098199 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.771664 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.622736 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.098199 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.771664 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.622736 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 80929.331248 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 80929.331248 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 79745.978552 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 79745.978552 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 86412.535296 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 86412.535296 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 79745.978552 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 81885.504872 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 81810.897561 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 79745.978552 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 81885.504872 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 81810.897561 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.writebacks::writebacks 86215 # number of writebacks -system.cpu.l2cache.writebacks::total 86215 # number of writebacks -system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 13 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadCleanReq_mshr_hits::total 13 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 62 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 62 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.inst 13 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 62 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 75 # number of demand (read+write) MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.inst 13 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 62 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 75 # number of overall MSHR hits +system.cpu.l2cache.writebacks::writebacks 86212 # number of writebacks +system.cpu.l2cache.writebacks::total 86212 # number of writebacks +system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 12 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::total 12 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 60 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 60 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.inst 12 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 60 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 72 # number of demand (read+write) MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.inst 12 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.data 60 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 72 # number of overall MSHR hits system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 96 # number of CleanEvict MSHR misses system.cpu.l2cache.CleanEvict_mshr_misses::total 96 # number of CleanEvict MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 102282 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 102282 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 4462 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 4462 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 21541 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 21541 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 4462 # number of demand (read+write) MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 102280 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 102280 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 4464 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 4464 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 21543 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 21543 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 4464 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.data 123823 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 128285 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 4462 # number of overall MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 128287 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 4464 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 123823 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 128285 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 7256803500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 7256803500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 310457000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 310457000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 1652012000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 1652012000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 310457000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 8908815500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 9219272500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 310457000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 8908815500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 9219272500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::total 128287 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 7254652000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 7254652000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 311353500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 311353500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 1646809500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 1646809500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 311353500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 8901461500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 9212815000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 311353500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 8901461500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 9212815000 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.955603 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.955603 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.097980 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.097980 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.402560 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.402560 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.097980 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.771271 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.622489 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.097980 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.771271 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.622489 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 70948.979293 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 70948.979293 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69577.991932 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69577.991932 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 76691.518500 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 76691.518500 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69577.991932 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 71947.986238 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 71865.553260 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69577.991932 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 71947.986238 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 71865.553260 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 406029 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 199980 # Number of requests hitting in the snoop filter with a single holder of the requested data. -system.cpu.toL2Bus.snoop_filter.hit_multi_requests 7832 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 3359 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 3330 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.955557 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.955557 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.097936 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.097936 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.402650 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.402650 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.097936 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.771291 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.622387 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.097936 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.771291 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.622387 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 70929.331248 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 70929.331248 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 69747.647849 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 69747.647849 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 76442.904888 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 76442.904888 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 69747.647849 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 71888.595011 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 71814.096518 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 69747.647849 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 71888.595011 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 71814.096518 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 406103 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 200020 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.hit_multi_requests 7844 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 3360 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 3331 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 29 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 99049 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 214604 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 43497 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 38235 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 107034 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 107034 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 45540 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 53510 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 134576 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 477536 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 612112 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 5698304 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 18491712 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 24190016 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 96391 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 302475 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.037210 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.189781 # Request fanout histogram +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 99083 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 214595 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 43538 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 38242 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 107037 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 107037 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 45581 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 53503 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 134699 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 477524 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 612223 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 5703552 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 18491072 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 24194624 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 96393 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 5517568 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 302514 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.037258 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.189899 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 291249 96.29% 96.29% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 11197 3.70% 99.99% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 291272 96.28% 96.28% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 11213 3.71% 99.99% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 29 0.01% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 2 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 302475 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 374900500 # Layer occupancy (ticks) -system.cpu.toL2Bus.reqLayer0.utilization 0.7 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 68328959 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 302514 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 374972500 # Layer occupancy (ticks) +system.cpu.toL2Bus.reqLayer0.utilization 0.6 # Layer utilization (%) +system.cpu.toL2Bus.respLayer0.occupancy 68384970 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.1 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 240850431 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 240842435 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.4 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 56802974500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 26002 # Transaction distribution -system.membus.trans_dist::WritebackDirty 86215 # Transaction distribution -system.membus.trans_dist::CleanEvict 6912 # Transaction distribution -system.membus.trans_dist::ReadExReq 102282 # Transaction distribution -system.membus.trans_dist::ReadExResp 102282 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 26002 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 349695 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 349695 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 13727936 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 13727936 # Cumulative packet size per connected master and slave (bytes) +system.membus.pwrStateResidencyTicks::UNDEFINED 58768125500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 26006 # Transaction distribution +system.membus.trans_dist::WritebackDirty 86212 # Transaction distribution +system.membus.trans_dist::CleanEvict 6916 # Transaction distribution +system.membus.trans_dist::ReadExReq 102280 # Transaction distribution +system.membus.trans_dist::ReadExResp 102280 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 26006 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 349700 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 349700 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 13727872 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 13727872 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 221411 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 221414 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 221411 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 221414 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 221411 # Request fanout histogram -system.membus.reqLayer0.occupancy 590704500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 221414 # Request fanout histogram +system.membus.reqLayer0.occupancy 586752500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 1.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 676958000 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 676437000 # Layer occupancy (ticks) system.membus.respLayer1.utilization 1.2 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/config.ini b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/config.ini index cbb778c28..8d8e9be85 100644 --- a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/vortex +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/vortex gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simerr b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simout b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simout index dab41dff0..7e748e0bc 100755 --- a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/50.vortex/arm/linux/o3-timing/ gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 15:51:04 -gem5 started Mar 16 2016 17:20:18 -gem5 executing on dinar2c11, pid 17075 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/50.vortex/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/50.vortex/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:23 +gem5 executing on e108600-lin, pid 23088 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/50.vortex/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/50.vortex/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/stats.txt index 778d6ee7e..27ec3468d 100644 --- a/tests/long/se/50.vortex/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/50.vortex/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.033525 # Nu sim_ticks 33524756000 # Number of ticks simulated final_tick 33524756000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 198459 # Simulator instruction rate (inst/s) -host_op_rate 253806 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 93830272 # Simulator tick rate (ticks/s) -host_mem_usage 324968 # Number of bytes of host memory used -host_seconds 357.29 # Real time elapsed on the host +host_inst_rate 98614 # Simulator instruction rate (inst/s) +host_op_rate 126116 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 46624375 # Simulator tick rate (ticks/s) +host_mem_usage 277828 # Number of bytes of host memory used +host_seconds 719.04 # Real time elapsed on the host sim_insts 70907652 # Number of instructions simulated sim_ops 90682607 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1186,6 +1186,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 62278272 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 103910912 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 318692 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 6218112 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 1131024 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.140178 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.373630 # Request fanout histogram @@ -1216,6 +1217,7 @@ system.membus.pkt_count::total 431450 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 16014592 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 16014592 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 278362 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/config.ini index dc295a8fa..10131fd38 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/bzip2 gid=100 input=cin kvmInSE=false @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simerr index de77515a1..e0bca4e4e 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simerr @@ -1,4 +1,6 @@ +warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simout b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simout index f1d88cff2..cd35cd53a 100644..100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/simout @@ -3,10 +3,11 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/minor-t gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled May 7 2014 10:41:53 -gem5 started May 7 2014 12:11:11 -gem5 executing on cz3212c2d7 -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/minor-timing -re tests/run.py build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4307 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/alpha/tru64/minor-timing + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. @@ -25,4 +26,4 @@ Uncompressing Data Uncompressed data 1048576 bytes in length Uncompressed data compared correctly Tested 1MB buffer: OK! -Exiting @ tick 1184839137500 because target called exit() +Exiting @ tick 1219570622500 because target called exit() diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/stats.txt index e74f79662..096e1a113 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/minor-timing/stats.txt @@ -1,106 +1,106 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 1.208778 # Number of seconds simulated -sim_ticks 1208777694500 # Number of ticks simulated -final_tick 1208777694500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 1.219571 # Number of seconds simulated +sim_ticks 1219570622500 # Number of ticks simulated +final_tick 1219570622500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 530685 # Simulator instruction rate (inst/s) -host_op_rate 530685 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 351230785 # Simulator tick rate (ticks/s) -host_mem_usage 297332 # Number of bytes of host memory used -host_seconds 3441.55 # Real time elapsed on the host +host_inst_rate 313924 # Simulator instruction rate (inst/s) +host_op_rate 313924 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 209623743 # Simulator tick rate (ticks/s) +host_mem_usage 249764 # Number of bytes of host memory used +host_seconds 5817.90 # Real time elapsed on the host sim_insts 1826378509 # Number of instructions simulated sim_ops 1826378509 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 61312 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 124970112 # Number of bytes read from this memory -system.physmem.bytes_read::total 125031424 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 61312 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 61312 # Number of instructions bytes read from this memory -system.physmem.bytes_written::writebacks 65416896 # Number of bytes written to this memory -system.physmem.bytes_written::total 65416896 # Number of bytes written to this memory -system.physmem.num_reads::cpu.inst 958 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 1952658 # Number of read requests responded to by this memory -system.physmem.num_reads::total 1953616 # Number of read requests responded to by this memory -system.physmem.num_writes::writebacks 1022139 # Number of write requests responded to by this memory -system.physmem.num_writes::total 1022139 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 50722 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 103385521 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 103436244 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 50722 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 50722 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 54118219 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 54118219 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 54118219 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 50722 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 103385521 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 157554463 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 1953616 # Number of read requests accepted -system.physmem.writeReqs 1022139 # Number of write requests accepted -system.physmem.readBursts 1953616 # Number of DRAM read bursts, including those serviced by the write queue -system.physmem.writeBursts 1022139 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 124948416 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 83008 # Total number of bytes read from write queue -system.physmem.bytesWritten 65415616 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 125031424 # Total read bytes from the system interface side -system.physmem.bytesWrittenSys 65416896 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 1297 # Number of DRAM read bursts serviced by the write queue +system.physmem.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 61632 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 124970496 # Number of bytes read from this memory +system.physmem.bytes_read::total 125032128 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 61632 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 61632 # Number of instructions bytes read from this memory +system.physmem.bytes_written::writebacks 65417280 # Number of bytes written to this memory +system.physmem.bytes_written::total 65417280 # Number of bytes written to this memory +system.physmem.num_reads::cpu.inst 963 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 1952664 # Number of read requests responded to by this memory +system.physmem.num_reads::total 1953627 # Number of read requests responded to by this memory +system.physmem.num_writes::writebacks 1022145 # Number of write requests responded to by this memory +system.physmem.num_writes::total 1022145 # Number of write requests responded to by this memory +system.physmem.bw_read::cpu.inst 50536 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 102470897 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 102521433 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 50536 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 50536 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 53639600 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 53639600 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 53639600 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 50536 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 102470897 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 156161033 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 1953627 # Number of read requests accepted +system.physmem.writeReqs 1022145 # Number of write requests accepted +system.physmem.readBursts 1953627 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.writeBursts 1022145 # Number of DRAM write bursts, including those merged in the write queue +system.physmem.bytesReadDRAM 124950016 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 82112 # Total number of bytes read from write queue +system.physmem.bytesWritten 65416064 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 125032128 # Total read bytes from the system interface side +system.physmem.bytesWrittenSys 65417280 # Total written bytes from the system interface side +system.physmem.servicedByWrQ 1283 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 118316 # Per bank write bursts -system.physmem.perBankRdBursts::1 113525 # Per bank write bursts -system.physmem.perBankRdBursts::2 115740 # Per bank write bursts -system.physmem.perBankRdBursts::3 117258 # Per bank write bursts -system.physmem.perBankRdBursts::4 117310 # Per bank write bursts -system.physmem.perBankRdBursts::5 117126 # Per bank write bursts -system.physmem.perBankRdBursts::6 119402 # Per bank write bursts -system.physmem.perBankRdBursts::7 124113 # Per bank write bursts -system.physmem.perBankRdBursts::8 126650 # Per bank write bursts +system.physmem.perBankRdBursts::0 118315 # Per bank write bursts +system.physmem.perBankRdBursts::1 113533 # Per bank write bursts +system.physmem.perBankRdBursts::2 115749 # Per bank write bursts +system.physmem.perBankRdBursts::3 117256 # Per bank write bursts +system.physmem.perBankRdBursts::4 117296 # Per bank write bursts +system.physmem.perBankRdBursts::5 117124 # Per bank write bursts +system.physmem.perBankRdBursts::6 119398 # Per bank write bursts +system.physmem.perBankRdBursts::7 124125 # Per bank write bursts +system.physmem.perBankRdBursts::8 126652 # Per bank write bursts system.physmem.perBankRdBursts::9 129582 # Per bank write bursts -system.physmem.perBankRdBursts::10 128169 # Per bank write bursts -system.physmem.perBankRdBursts::11 129917 # Per bank write bursts -system.physmem.perBankRdBursts::12 125580 # Per bank write bursts -system.physmem.perBankRdBursts::13 124837 # Per bank write bursts -system.physmem.perBankRdBursts::14 122150 # Per bank write bursts -system.physmem.perBankRdBursts::15 122644 # Per bank write bursts -system.physmem.perBankWrBursts::0 61421 # Per bank write bursts -system.physmem.perBankWrBursts::1 61661 # Per bank write bursts -system.physmem.perBankWrBursts::2 60724 # Per bank write bursts -system.physmem.perBankWrBursts::3 61398 # Per bank write bursts -system.physmem.perBankWrBursts::4 61819 # Per bank write bursts -system.physmem.perBankWrBursts::5 63309 # Per bank write bursts -system.physmem.perBankWrBursts::6 64356 # Per bank write bursts -system.physmem.perBankWrBursts::7 65855 # Per bank write bursts -system.physmem.perBankWrBursts::8 65577 # Per bank write bursts -system.physmem.perBankWrBursts::9 66031 # Per bank write bursts -system.physmem.perBankWrBursts::10 65643 # Per bank write bursts -system.physmem.perBankWrBursts::11 65945 # Per bank write bursts -system.physmem.perBankWrBursts::12 64508 # Per bank write bursts -system.physmem.perBankWrBursts::13 64526 # Per bank write bursts +system.physmem.perBankRdBursts::10 128170 # Per bank write bursts +system.physmem.perBankRdBursts::11 129930 # Per bank write bursts +system.physmem.perBankRdBursts::12 125581 # Per bank write bursts +system.physmem.perBankRdBursts::13 124839 # Per bank write bursts +system.physmem.perBankRdBursts::14 122149 # Per bank write bursts +system.physmem.perBankRdBursts::15 122645 # Per bank write bursts +system.physmem.perBankWrBursts::0 61422 # Per bank write bursts +system.physmem.perBankWrBursts::1 61664 # Per bank write bursts +system.physmem.perBankWrBursts::2 60725 # Per bank write bursts +system.physmem.perBankWrBursts::3 61395 # Per bank write bursts +system.physmem.perBankWrBursts::4 61816 # Per bank write bursts +system.physmem.perBankWrBursts::5 63307 # Per bank write bursts +system.physmem.perBankWrBursts::6 64357 # Per bank write bursts +system.physmem.perBankWrBursts::7 65854 # Per bank write bursts +system.physmem.perBankWrBursts::8 65580 # Per bank write bursts +system.physmem.perBankWrBursts::9 66032 # Per bank write bursts +system.physmem.perBankWrBursts::10 65645 # Per bank write bursts +system.physmem.perBankWrBursts::11 65946 # Per bank write bursts +system.physmem.perBankWrBursts::12 64510 # Per bank write bursts +system.physmem.perBankWrBursts::13 64527 # Per bank write bursts system.physmem.perBankWrBursts::14 64900 # Per bank write bursts system.physmem.perBankWrBursts::15 64446 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 1208777578000 # Total gap between requests +system.physmem.totGap 1219570506500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 1953616 # Read request sizes (log2) +system.physmem.readPktSize::6 1953627 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) system.physmem.writePktSize::3 0 # Write request sizes (log2) system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) -system.physmem.writePktSize::6 1022139 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 1830097 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 122205 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 17 # What read queue length does an incoming req see +system.physmem.writePktSize::6 1022145 # Write request sizes (log2) +system.physmem.rdQLenPdf::0 1833407 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 118928 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 9 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,35 +145,35 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 30602 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 32045 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 55307 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 59695 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 60116 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 60223 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 60190 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 60196 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 60182 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 60140 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 60199 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 60169 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 60684 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 61042 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 60657 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 61101 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 59828 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 59617 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 96 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::34 18 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::35 7 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::36 3 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::37 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::38 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::39 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::40 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::41 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::42 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::43 1 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 30664 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 32017 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 55394 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 59725 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 60150 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 60160 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 60171 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 60164 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 60165 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 60205 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 60270 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 60241 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 60697 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 61009 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 60531 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 61008 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 59822 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 59630 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 89 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::34 16 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::35 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::36 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::37 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::38 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::39 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::40 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::41 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::42 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::43 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::44 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::45 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::46 0 # What write queue length does an incoming req see @@ -194,31 +194,31 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 1831457 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 103.940817 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 81.136003 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 130.529919 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 1452947 79.33% 79.33% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 261995 14.31% 93.64% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 48664 2.66% 96.30% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 20593 1.12% 97.42% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 13175 0.72% 98.14% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 7238 0.40% 98.53% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 5438 0.30% 98.83% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 4580 0.25% 99.08% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 16827 0.92% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 1831457 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 59614 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 32.747643 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 146.947369 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::0-511 59453 99.73% 99.73% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::512-1023 115 0.19% 99.92% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::1024-1535 9 0.02% 99.94% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::1536-2047 9 0.02% 99.95% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::2048-2559 8 0.01% 99.97% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::2560-3071 3 0.01% 99.97% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::3072-3583 3 0.01% 99.98% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::3584-4095 3 0.01% 99.98% # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 1832533 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 103.880589 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 81.106196 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 130.417770 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 1454670 79.38% 79.38% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 261169 14.25% 93.63% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 48917 2.67% 96.30% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 20611 1.12% 97.43% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 13239 0.72% 98.15% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 7059 0.39% 98.53% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 5499 0.30% 98.83% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 4584 0.25% 99.08% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 16785 0.92% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 1832533 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 59623 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 32.744209 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 148.154914 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::0-511 59464 99.73% 99.73% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::512-1023 114 0.19% 99.92% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::1024-1535 10 0.02% 99.94% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::1536-2047 6 0.01% 99.95% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::2048-2559 6 0.01% 99.96% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::2560-3071 5 0.01% 99.97% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::3072-3583 3 0.01% 99.97% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::3584-4095 4 0.01% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::4096-4607 2 0.00% 99.98% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::4608-5119 2 0.00% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::6656-7167 1 0.00% 99.99% # Reads before turning the bus around for writes @@ -226,109 +226,107 @@ system.physmem.rdPerTurnAround::8704-9215 1 0.00% 99.99% # R system.physmem.rdPerTurnAround::9216-9727 1 0.00% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::10752-11263 1 0.00% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::11776-12287 1 0.00% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::12288-12799 1 0.00% 100.00% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::12800-13311 1 0.00% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::14848-15359 1 0.00% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 59614 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 59614 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 17.145620 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 17.109391 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 1.119268 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16 27453 46.05% 46.05% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::17 1268 2.13% 48.18% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 26337 44.18% 92.36% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::19 4007 6.72% 99.08% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::20 455 0.76% 99.84% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::21 71 0.12% 99.96% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::22 15 0.03% 99.99% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::23 6 0.01% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::24 1 0.00% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::33 1 0.00% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 59614 # Writes before turning the bus around for reads -system.physmem.totQLat 36537628750 # Total ticks spent queuing -system.physmem.totMemAccLat 73143610000 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 9761595000 # Total ticks spent in databus transfers -system.physmem.avgQLat 18714.99 # Average queueing delay per DRAM burst +system.physmem.rdPerTurnAround::total 59623 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 59623 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 17.143149 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 17.107238 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 1.113236 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16 27459 46.05% 46.05% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::17 1251 2.10% 48.15% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 26456 44.37% 92.52% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::19 3936 6.60% 99.13% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::20 436 0.73% 99.86% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::21 70 0.12% 99.97% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::22 12 0.02% 99.99% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::23 3 0.01% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 59623 # Writes before turning the bus around for reads +system.physmem.totQLat 36415699500 # Total ticks spent queuing +system.physmem.totMemAccLat 73022149500 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 9761720000 # Total ticks spent in databus transfers +system.physmem.avgQLat 18652.30 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 37464.99 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 103.37 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 54.12 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 103.44 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 54.12 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 37402.30 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 102.45 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 53.64 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 102.52 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 53.64 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 1.23 # Data bus utilization in percentage -system.physmem.busUtilRead 0.81 # Data bus utilization in percentage for reads +system.physmem.busUtil 1.22 # Data bus utilization in percentage +system.physmem.busUtilRead 0.80 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.42 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.02 # Average read queue length when enqueuing -system.physmem.avgWrQLen 24.80 # Average write queue length when enqueuing -system.physmem.readRowHits 723773 # Number of row buffer hits during reads -system.physmem.writeRowHits 419204 # Number of row buffer hits during writes -system.physmem.readRowHitRate 37.07 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 41.01 # Row buffer hit rate for writes -system.physmem.avgGap 406208.70 # Average gap between requests -system.physmem.pageHitRate 38.43 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 6714376200 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 3663598125 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 7353738600 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 3243518640 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 78951397200 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 415074736440 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 361165338750 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 876166703955 # Total energy per rank (pJ) -system.physmem_0.averagePower 724.837554 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 598070170000 # Time in different power states -system.physmem_0.memoryStateTime::REF 40363700000 # Time in different power states +system.physmem.avgWrQLen 24.66 # Average write queue length when enqueuing +system.physmem.readRowHits 723035 # Number of row buffer hits during reads +system.physmem.writeRowHits 418897 # Number of row buffer hits during writes +system.physmem.readRowHitRate 37.03 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 40.98 # Row buffer hit rate for writes +system.physmem.avgGap 409833.32 # Average gap between requests +system.physmem.pageHitRate 38.39 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 6719093640 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 3666172125 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 7353785400 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 3243499200 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 79656261360 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 415707006375 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 367085761500 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 883431579600 # Total energy per rank (pJ) +system.physmem_0.averagePower 724.380520 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 607907659750 # Time in different power states +system.physmem_0.memoryStateTime::REF 40724060000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 570342873000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 570937965250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 7131423600 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 3891153750 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 7874224800 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 3379812480 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 78951397200 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 426560774805 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 351089866500 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 878878653135 # Total energy per rank (pJ) -system.physmem_1.averagePower 727.081103 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 581228871000 # Time in different power states -system.physmem_1.memoryStateTime::REF 40363700000 # Time in different power states +system.physmem_1.actEnergy 7134833160 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 3893014125 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 7874240400 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 3379877280 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 79656261360 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 426752022060 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 357397152750 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 886087401135 # Total energy per rank (pJ) +system.physmem_1.averagePower 726.558192 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 591710247250 # Time in different power states +system.physmem_1.memoryStateTime::REF 40724060000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 587184084000 # Time in different power states +system.physmem_1.memoryStateTime::ACT 587134092250 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 246097965 # Number of BP lookups -system.cpu.branchPred.condPredicted 186356162 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 15588061 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 167640085 # Number of BTB lookups -system.cpu.branchPred.BTBHits 165196337 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 246937199 # Number of BP lookups +system.cpu.branchPred.condPredicted 186891611 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 15587043 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 168278704 # Number of BTB lookups +system.cpu.branchPred.BTBHits 165579614 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 98.542265 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 18413332 # Number of times the RAS was used to get a target. -system.cpu.branchPred.RASInCorrect 104391 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 297 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 67 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 230 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 98 # Number of mispredicted indirect branches. +system.cpu.branchPred.BTBHitPct 98.396060 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 18556464 # Number of times the RAS was used to get a target. +system.cpu.branchPred.RASInCorrect 106119 # Number of incorrect RAS predictions. +system.cpu.branchPred.indirectLookups 314 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 63 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 251 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 101 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 452860657 # DTB read hits -system.cpu.dtb.read_misses 4979867 # DTB read misses +system.cpu.dtb.read_hits 453406129 # DTB read hits +system.cpu.dtb.read_misses 5001511 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 457840524 # DTB read accesses -system.cpu.dtb.write_hits 161378231 # DTB write hits -system.cpu.dtb.write_misses 1709431 # DTB write misses +system.cpu.dtb.read_accesses 458407640 # DTB read accesses +system.cpu.dtb.write_hits 161376524 # DTB write hits +system.cpu.dtb.write_misses 1709205 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 163087662 # DTB write accesses -system.cpu.dtb.data_hits 614238888 # DTB hits -system.cpu.dtb.data_misses 6689298 # DTB misses +system.cpu.dtb.write_accesses 163085729 # DTB write accesses +system.cpu.dtb.data_hits 614782653 # DTB hits +system.cpu.dtb.data_misses 6710716 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 620928186 # DTB accesses -system.cpu.itb.fetch_hits 597989612 # ITB hits +system.cpu.dtb.data_accesses 621493369 # DTB accesses +system.cpu.itb.fetch_hits 600073027 # ITB hits system.cpu.itb.fetch_misses 19 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 597989631 # ITB accesses +system.cpu.itb.fetch_accesses 600073046 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -342,16 +340,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 29 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 2417555389 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 2439141245 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 1826378509 # Number of instructions committed system.cpu.committedOps 1826378509 # Number of ops (including micro ops) committed -system.cpu.discardedOps 51811935 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 55113124 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.323688 # CPI: cycles per instruction -system.cpu.ipc 0.755465 # IPC: instructions per cycle +system.cpu.cpi 1.335507 # CPI: cycles per instruction +system.cpu.ipc 0.748779 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 83736345 4.58% 4.58% # Class of committed instruction system.cpu.op_class_0::IntAlu 1129914150 61.87% 66.45% # Class of committed instruction system.cpu.op_class_0::IntMult 75 0.00% 66.45% # Class of committed instruction @@ -387,176 +385,176 @@ system.cpu.op_class_0::MemWrite 162429806 8.89% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 1826378509 # Class of committed instruction -system.cpu.tickCycles 2075251932 # Number of cycles that the object actually ticked -system.cpu.idleCycles 342303457 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 9121974 # number of replacements -system.cpu.dcache.tags.tagsinuse 4080.726355 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 601538856 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 9126070 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 65.914337 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 16821281500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4080.726355 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.996271 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.996271 # Average percentage of cache occupancy +system.cpu.tickCycles 2082121954 # Number of cycles that the object actually ticked +system.cpu.idleCycles 357019291 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 9121976 # number of replacements +system.cpu.dcache.tags.tagsinuse 4080.816467 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 602780801 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 9126072 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 66.050410 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 16880243500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4080.816467 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.996293 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.996293 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 56 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 1562 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 2407 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 71 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 1561 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 2409 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 70 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 1231275880 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 1231275880 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 443056865 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 443056865 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 158481991 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 158481991 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 601538856 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 601538856 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 601538856 # number of overall hits -system.cpu.dcache.overall_hits::total 601538856 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 7289538 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 7289538 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 2246511 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 2246511 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 9536049 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 9536049 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 9536049 # number of overall misses -system.cpu.dcache.overall_misses::total 9536049 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 185480529000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 185480529000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 108417025500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 108417025500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 293897554500 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 293897554500 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 293897554500 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 293897554500 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 450346403 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 450346403 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.tags.tag_accesses 1233657814 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 1233657814 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 444298266 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 444298266 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 158482535 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 158482535 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 602780801 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 602780801 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 602780801 # number of overall hits +system.cpu.dcache.overall_hits::total 602780801 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 7239103 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 7239103 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 2245967 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 2245967 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 9485070 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 9485070 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 9485070 # number of overall misses +system.cpu.dcache.overall_misses::total 9485070 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 184068939500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 184068939500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 108510867000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 108510867000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 292579806500 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 292579806500 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 292579806500 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 292579806500 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 451537369 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 451537369 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 160728502 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 160728502 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 611074905 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 611074905 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 611074905 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 611074905 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.016187 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.016187 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.013977 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.013977 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.015605 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.015605 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.015605 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.015605 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 25444.757816 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 25444.757816 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 48260.180119 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 48260.180119 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 30819.635522 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 30819.635522 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 30819.635522 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 30819.635522 # average overall miss latency +system.cpu.dcache.demand_accesses::cpu.data 612265871 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 612265871 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 612265871 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 612265871 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.016032 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.016032 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.013974 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.013974 # miss rate for WriteReq accesses +system.cpu.dcache.demand_miss_rate::cpu.data 0.015492 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.015492 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.015492 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.015492 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 25427.036955 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 25427.036955 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 48313.651536 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 48313.651536 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 30846.351846 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 30846.351846 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 30846.351846 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 30846.351846 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 3686603 # number of writebacks -system.cpu.dcache.writebacks::total 3686603 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 50808 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 50808 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 359171 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 359171 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 409979 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 409979 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 409979 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 409979 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 7238730 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 7238730 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1887340 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 1887340 # number of WriteReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 9126070 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 9126070 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 9126070 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 9126070 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 177011068000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 177011068000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 83258719000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 83258719000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 260269787000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 260269787000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 260269787000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 260269787000 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.016074 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.016074 # mshr miss rate for ReadReq accesses +system.cpu.dcache.writebacks::writebacks 3686661 # number of writebacks +system.cpu.dcache.writebacks::total 3686661 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 370 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 370 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 358628 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 358628 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 358998 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 358998 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 358998 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 358998 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 7238733 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 7238733 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1887339 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 1887339 # number of WriteReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 9126072 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 9126072 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 9126072 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 9126072 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 176823131500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 176823131500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 83341929000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 83341929000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 260165060500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 260165060500 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 260165060500 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 260165060500 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.016031 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.016031 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.011742 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.011742 # mshr miss rate for WriteReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.014934 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.014934 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.014934 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.014934 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 24453.332007 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 24453.332007 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 44114.319095 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 44114.319095 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 28519.372194 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 28519.372194 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 28519.372194 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 28519.372194 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.014905 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.014905 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.014905 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.014905 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 24427.359249 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 24427.359249 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 44158.430997 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 44158.430997 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 28507.890416 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 28507.890416 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 28507.890416 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 28507.890416 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states system.cpu.icache.tags.replacements 3 # number of replacements -system.cpu.icache.tags.tagsinuse 750.173547 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 597988654 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 958 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 624205.275574 # Average number of references to valid blocks. +system.cpu.icache.tags.tagsinuse 752.953880 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 600072064 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 963 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 623127.792316 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 750.173547 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.366296 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.366296 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 955 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::0 81 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 874 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.466309 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 1195980182 # Number of tag accesses -system.cpu.icache.tags.data_accesses 1195980182 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 597988654 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 597988654 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 597988654 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 597988654 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 597988654 # number of overall hits -system.cpu.icache.overall_hits::total 597988654 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 958 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 958 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 958 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 958 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 958 # number of overall misses -system.cpu.icache.overall_misses::total 958 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 76338000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 76338000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 76338000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 76338000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 76338000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 76338000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 597989612 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 597989612 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 597989612 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 597989612 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 597989612 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 597989612 # number of overall (read+write) accesses +system.cpu.icache.tags.occ_blocks::cpu.inst 752.953880 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.367653 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.367653 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 960 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::0 82 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 878 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.468750 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 1200147017 # Number of tag accesses +system.cpu.icache.tags.data_accesses 1200147017 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 600072064 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 600072064 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 600072064 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 600072064 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 600072064 # number of overall hits +system.cpu.icache.overall_hits::total 600072064 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 963 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 963 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 963 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 963 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 963 # number of overall misses +system.cpu.icache.overall_misses::total 963 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 76328500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 76328500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 76328500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 76328500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 76328500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 76328500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 600073027 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 600073027 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 600073027 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 600073027 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 600073027 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 600073027 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000002 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000002 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000002 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000002 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000002 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000002 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 79684.759916 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 79684.759916 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 79684.759916 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 79684.759916 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 79684.759916 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 79684.759916 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 79261.163032 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 79261.163032 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 79261.163032 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 79261.163032 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 79261.163032 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 79261.163032 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -565,254 +563,256 @@ system.cpu.icache.avg_blocked_cycles::no_mshrs nan system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.icache.writebacks::writebacks 3 # number of writebacks system.cpu.icache.writebacks::total 3 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 958 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 958 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 958 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 958 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 958 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 958 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 75380000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 75380000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 75380000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 75380000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 75380000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 75380000 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 963 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 963 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 963 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 963 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 963 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 963 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 75365500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 75365500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 75365500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 75365500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 75365500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 75365500 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000002 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000002 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000002 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 78684.759916 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 78684.759916 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 78684.759916 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 78684.759916 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 78684.759916 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 78684.759916 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 1920891 # number of replacements -system.cpu.l2cache.tags.tagsinuse 30765.315888 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 14409692 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 1950696 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 7.386949 # Average number of references to valid blocks. -system.cpu.l2cache.tags.warmup_cycle 89219766000 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 14798.392410 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 42.817395 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 15924.106083 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.451611 # Average percentage of cache occupancy +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 78261.163032 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 78261.163032 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 78261.163032 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 78261.163032 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 78261.163032 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 78261.163032 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 1920902 # number of replacements +system.cpu.l2cache.tags.tagsinuse 30774.220213 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 14409691 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 1950707 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 7.386907 # Average number of references to valid blocks. +system.cpu.l2cache.tags.warmup_cycle 89512155000 # Cycle when the warmup percentage was hit. +system.cpu.l2cache.tags.occ_blocks::writebacks 14829.947034 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 42.825587 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 15901.447592 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.452574 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_percent::cpu.inst 0.001307 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.485965 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.938883 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.485274 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.939155 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 29805 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::0 155 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::0 156 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 36 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 1217 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 12865 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 15532 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 1218 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 12864 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 15531 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.909576 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 149830076 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 149830076 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 3686603 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 3686603 # number of WritebackDirty hits +system.cpu.l2cache.tags.tag_accesses 149830158 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 149830158 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 3686661 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 3686661 # number of WritebackDirty hits system.cpu.l2cache.WritebackClean_hits::writebacks 3 # number of WritebackClean hits system.cpu.l2cache.WritebackClean_hits::total 3 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 1106830 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 1106830 # number of ReadExReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 6066582 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 6066582 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.data 7173412 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 7173412 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.data 7173412 # number of overall hits -system.cpu.l2cache.overall_hits::total 7173412 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 780510 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 780510 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 958 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 958 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1172148 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 1172148 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 958 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 1952658 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 1953616 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 958 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 1952658 # number of overall misses -system.cpu.l2cache.overall_misses::total 1953616 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 68734828000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 68734828000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 73941000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 73941000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 102426227000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 102426227000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 73941000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 171161055000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 171234996000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 73941000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 171161055000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 171234996000 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 3686603 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 3686603 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.ReadExReq_hits::cpu.data 1106827 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 1106827 # number of ReadExReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 6066581 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 6066581 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.data 7173408 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 7173408 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.data 7173408 # number of overall hits +system.cpu.l2cache.overall_hits::total 7173408 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 780512 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 780512 # number of ReadExReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 963 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 963 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1172152 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 1172152 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 963 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 1952664 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 1953627 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 963 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 1952664 # number of overall misses +system.cpu.l2cache.overall_misses::total 1953627 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 68817926000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 68817926000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 73918500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 73918500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 102249953000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 102249953000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 73918500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 171067879000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 171141797500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 73918500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 171067879000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 171141797500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 3686661 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 3686661 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackClean_accesses::writebacks 3 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.WritebackClean_accesses::total 3 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 1887340 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 1887340 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 958 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 958 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 7238730 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 7238730 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 958 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 9126070 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 9127028 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 958 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 9126070 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 9127028 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.413550 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.413550 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_accesses::cpu.data 1887339 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 1887339 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 963 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 963 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 7238733 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 7238733 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 963 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 9126072 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 9127035 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 963 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 9126072 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 9127035 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.413552 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.413552 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 1 # miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadCleanReq_miss_rate::total 1 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.161927 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.161927 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.161928 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.161928 # miss rate for ReadSharedReq accesses system.cpu.l2cache.demand_miss_rate::cpu.inst 1 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate::cpu.data 0.213965 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.214047 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.214048 # miss rate for demand accesses system.cpu.l2cache.overall_miss_rate::cpu.inst 1 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate::cpu.data 0.213965 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.214047 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 88063.994055 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 88063.994055 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 77182.672234 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 77182.672234 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 87383.356880 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 87383.356880 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 77182.672234 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 87655.418921 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 87650.283372 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 77182.672234 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 87655.418921 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 87650.283372 # average overall miss latency +system.cpu.l2cache.overall_miss_rate::total 0.214048 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 88170.234410 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 88170.234410 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 76758.566978 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 76758.566978 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 87232.673749 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 87232.673749 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 76758.566978 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 87607.432205 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 87602.084482 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 76758.566978 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 87607.432205 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 87602.084482 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.writebacks::writebacks 1022139 # number of writebacks -system.cpu.l2cache.writebacks::total 1022139 # number of writebacks +system.cpu.l2cache.writebacks::writebacks 1022145 # number of writebacks +system.cpu.l2cache.writebacks::total 1022145 # number of writebacks system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 242 # number of CleanEvict MSHR misses system.cpu.l2cache.CleanEvict_mshr_misses::total 242 # number of CleanEvict MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 780510 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 780510 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 958 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 958 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 1172148 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 1172148 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 958 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 1952658 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 1953616 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 958 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 1952658 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 1953616 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 60929728000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 60929728000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 64361000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 64361000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 90704747000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 90704747000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 64361000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 151634475000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 151698836000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 64361000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 151634475000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 151698836000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 780512 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 780512 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 963 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 963 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 1172152 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 1172152 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 963 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 1952664 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 1953627 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 963 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 1952664 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 1953627 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 61012806000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 61012806000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 64288500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 64288500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 90528433000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 90528433000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 64288500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 151541239000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 151605527500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 64288500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 151541239000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 151605527500 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.413550 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.413550 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.413552 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.413552 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 1 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 1 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.161927 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.161927 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.161928 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.161928 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 1 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.213965 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.214047 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.214048 # mshr miss rate for demand accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 1 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.213965 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.214047 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 78063.994055 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 78063.994055 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 67182.672234 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 67182.672234 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 77383.356880 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 77383.356880 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 67182.672234 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 77655.418921 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 77650.283372 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 67182.672234 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 77655.418921 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 77650.283372 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 18249005 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 9121977 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::total 0.214048 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 78170.234410 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 78170.234410 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 66758.566978 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 66758.566978 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 77232.673749 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 77232.673749 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 66758.566978 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 77607.432205 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 77602.084482 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 66758.566978 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 77607.432205 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 77602.084482 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 18249014 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 9121979 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 1268 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 1268 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 1272 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 1272 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 7239688 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 4708742 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 7239696 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 4708806 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackClean 3 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 6334123 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 1887340 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 1887340 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 958 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 7238730 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1919 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 27374114 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 27376033 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 61504 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 820011072 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 820072576 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 1920891 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 11047919 # Request fanout histogram +system.cpu.toL2Bus.trans_dist::CleanEvict 6334072 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 1887339 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 1887339 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 963 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 7238733 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1929 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 27374120 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 27376049 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 61824 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 820014912 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 820076736 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 1920902 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 65417280 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 11047937 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000115 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.010713 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.010729 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 11046651 99.99% 99.99% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 1268 0.01% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 11046665 99.99% 99.99% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 1272 0.01% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 11047919 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 12811108500 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 11047937 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 12811171000 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 1.1 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 1437000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 1444500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 13689105000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 13689108000 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 1.1 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 1208777694500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 1173106 # Transaction distribution -system.membus.trans_dist::WritebackDirty 1022139 # Transaction distribution -system.membus.trans_dist::CleanEvict 897726 # Transaction distribution -system.membus.trans_dist::ReadExReq 780510 # Transaction distribution -system.membus.trans_dist::ReadExResp 780510 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 1173106 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 5827097 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 5827097 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 190448320 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 190448320 # Cumulative packet size per connected master and slave (bytes) +system.membus.pwrStateResidencyTicks::UNDEFINED 1219570622500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 1173115 # Transaction distribution +system.membus.trans_dist::WritebackDirty 1022145 # Transaction distribution +system.membus.trans_dist::CleanEvict 897727 # Transaction distribution +system.membus.trans_dist::ReadExReq 780512 # Transaction distribution +system.membus.trans_dist::ReadExResp 780512 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 1173115 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 5827126 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 5827126 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 190449408 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 190449408 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 3873481 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 3873499 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 3873481 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 3873499 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 3873481 # Request fanout histogram -system.membus.reqLayer0.occupancy 8428417500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 3873499 # Request fanout histogram +system.membus.reqLayer0.occupancy 8456520500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.7 # Layer utilization (%) -system.membus.respLayer1.occupancy 10685410500 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 10686565250 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.9 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/config.ini b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/config.ini index 88e337781..b191243cb 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -68,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -104,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -143,11 +157,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -155,13 +176,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -171,6 +197,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -179,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -502,13 +534,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -518,6 +555,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -526,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -551,13 +594,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -567,6 +615,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -575,19 +624,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -595,6 +656,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -609,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/bzip2 gid=100 input=cin kvmInSE=false @@ -641,9 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -687,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -698,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simerr b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simerr +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simout b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simout index abe06b1e2..e33a21652 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simout +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/o3-timi gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:26:54 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/o3-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4309 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/alpha/tru64/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -26,4 +26,4 @@ Uncompressing Data Uncompressed data 1048576 bytes in length Uncompressed data compared correctly Tested 1MB buffer: OK! -Exiting @ tick 669556582000 because target called exit() +Exiting @ tick 669587683000 because target called exit() diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/stats.txt b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/stats.txt index 6c06e7b34..cd08b0f17 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.669588 # Nu sim_ticks 669587683000 # Number of ticks simulated final_tick 669587683000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 268815 # Simulator instruction rate (inst/s) -host_op_rate 268815 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 103681118 # Simulator tick rate (ticks/s) -host_mem_usage 297332 # Number of bytes of host memory used -host_seconds 6458.15 # Real time elapsed on the host +host_inst_rate 209688 # Simulator instruction rate (inst/s) +host_op_rate 209688 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 80876198 # Simulator tick rate (ticks/s) +host_mem_usage 251300 # Number of bytes of host memory used +host_seconds 8279.17 # Real time elapsed on the host sim_insts 1736043781 # Number of instructions simulated sim_ops 1736043781 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1059,6 +1059,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 828099072 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 828159872 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 1929018 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 65555456 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 11141265 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000114 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.010697 # Request fanout histogram @@ -1088,6 +1089,7 @@ system.membus.pkt_count::total 5851429 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 191105728 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 191105728 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 3889706 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/config.ini b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/config.ini index d3c80bc18..346da75e3 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/config.ini +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=atomic mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -67,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -114,7 +128,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/bzip2 gid=100 input=cin kvmInSE=false @@ -146,9 +160,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -163,11 +183,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simerr b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simerr index de77515a1..96524c915 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simerr +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simout b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simout index 1dfd46cbe..d6f6a9638 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simout +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/simout @@ -1,10 +1,13 @@ +Redirecting stdout to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-atomic/simout +Redirecting stderr to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 22 2014 16:27:55 -gem5 started Jan 22 2014 18:36:30 -gem5 executing on u200540-lin -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-atomic -re tests/run.py build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-atomic +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4310 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/alpha/tru64/simple-atomic + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/stats.txt b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/stats.txt index 1bae4420d..9e88e1d85 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/stats.txt +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.913189 # Nu sim_ticks 913189263000 # Number of ticks simulated final_tick 913189263000 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 3169811 # Simulator instruction rate (inst/s) -host_op_rate 3169811 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1590652371 # Simulator tick rate (ticks/s) -host_mem_usage 285272 # Number of bytes of host memory used -host_seconds 574.10 # Real time elapsed on the host +host_inst_rate 2010513 # Simulator instruction rate (inst/s) +host_op_rate 2010513 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1008901575 # Simulator tick rate (ticks/s) +host_mem_usage 239516 # Number of bytes of host memory used +host_seconds 905.13 # Real time elapsed on the host sim_insts 1819780127 # Number of instructions simulated sim_ops 1819780127 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -142,6 +142,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 7305514036 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 2802573242 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 10108087278 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 2431702674 # Request fanout histogram system.membus.snoop_fanout::mean 0.751070 # Request fanout histogram system.membus.snoop_fanout::stdev 0.432393 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/config.ini b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/config.ini index 97b7b2c5a..b0859ad68 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -83,13 +97,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -99,6 +118,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -107,8 +127,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -123,13 +148,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -139,6 +169,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -147,8 +178,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -172,13 +208,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -188,6 +229,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -196,19 +238,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -216,6 +270,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -230,7 +291,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/bzip2 gid=100 input=cin kvmInSE=false @@ -262,9 +323,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -279,11 +346,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simerr b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simerr index de77515a1..96524c915 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simerr +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simerr @@ -1,4 +1,5 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simout b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simout index 43eef16e9..eae87e351 100755 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simout +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/simout @@ -1,10 +1,13 @@ +Redirecting stdout to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-timing/simout +Redirecting stderr to build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 22 2014 16:27:55 -gem5 started Jan 22 2014 18:44:35 -gem5 executing on u200540-lin -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-timing -re tests/run.py build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4312 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/60.bzip2/alpha/tru64/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/alpha/tru64/simple-timing + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. @@ -23,4 +26,4 @@ Uncompressing Data Uncompressed data 1048576 bytes in length Uncompressed data compared correctly Tested 1MB buffer: OK! -Exiting @ tick 2623386226000 because target called exit() +Exiting @ tick 2636719559500 because target called exit() diff --git a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/stats.txt b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/stats.txt index d98d61e9c..6bd6eda32 100644 --- a/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/alpha/tru64/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 2.636720 # Nu sim_ticks 2636719559500 # Number of ticks simulated final_tick 2636719559500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1821657 # Simulator instruction rate (inst/s) -host_op_rate 1821657 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2639438336 # Simulator tick rate (ticks/s) -host_mem_usage 295280 # Number of bytes of host memory used -host_seconds 998.97 # Real time elapsed on the host +host_inst_rate 1223384 # Simulator instruction rate (inst/s) +host_op_rate 1223384 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1772587765 # Simulator tick rate (ticks/s) +host_mem_usage 249508 # Number of bytes of host memory used +host_seconds 1487.50 # Real time elapsed on the host sim_insts 1819780127 # Number of instructions simulated sim_ops 1819780127 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -507,6 +507,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 818634240 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 818685632 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 1919525 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 65405568 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 11032061 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000102 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.010084 # Request fanout histogram @@ -536,6 +537,7 @@ system.membus.pkt_count::total 5823129 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 190349056 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 190349056 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 3870887 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/config.ini b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/config.ini index cb09befab..9ef5c346e 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -99,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -120,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -132,13 +154,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -148,6 +175,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -156,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -180,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -196,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -591,13 +634,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -607,6 +655,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -615,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -626,6 +680,7 @@ eventq_index=0 [system.cpu.isa] type=ArmISA +decoderFlavour=Generic eventq_index=0 fpsid=1090793632 id_aa64afr0_el1=0 @@ -673,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -689,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -701,13 +766,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -717,6 +787,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -725,19 +796,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -745,6 +828,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -759,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -791,9 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -837,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -848,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simerr b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simerr index be90b0340..caeab8324 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: CP14 unimplemented crn[8], opc1[2], crm[9], opc2[4] diff --git a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simout b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simout index 1664fb28c..b6bf1e68a 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/60.bzip2/arm/linux/minor-timin gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 02:59:16 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/60.bzip2/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:58:37 +gem5 executing on e108600-lin, pid 24092 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/arm/linux/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -27,4 +27,4 @@ Uncompressing Data Uncompressed data 1048576 bytes in length Uncompressed data compared correctly Tested 1MB buffer: OK! -Exiting @ tick 1116876142500 because target called exit() +Exiting @ tick 1128033563500 because target called exit() diff --git a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/stats.txt index d91451297..a63511156 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/arm/linux/minor-timing/stats.txt @@ -1,106 +1,106 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 1.116866 # Number of seconds simulated -sim_ticks 1116865668500 # Number of ticks simulated -final_tick 1116865668500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 1.128034 # Number of seconds simulated +sim_ticks 1128033563500 # Number of ticks simulated +final_tick 1128033563500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 380135 # Simulator instruction rate (inst/s) -host_op_rate 409538 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 274873670 # Simulator tick rate (ticks/s) -host_mem_usage 314372 # Number of bytes of host memory used -host_seconds 4063.20 # Real time elapsed on the host +host_inst_rate 296898 # Simulator instruction rate (inst/s) +host_op_rate 319862 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 216832014 # Simulator tick rate (ticks/s) +host_mem_usage 266856 # Number of bytes of host memory used +host_seconds 5202.34 # Real time elapsed on the host sim_insts 1544563088 # Number of instructions simulated sim_ops 1664032481 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states +system.physmem.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states system.physmem.bytes_read::cpu.inst 50112 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 130931712 # Number of bytes read from this memory -system.physmem.bytes_read::total 130981824 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 130888128 # Number of bytes read from this memory +system.physmem.bytes_read::total 130938240 # Number of bytes read from this memory system.physmem.bytes_inst_read::cpu.inst 50112 # Number of instructions bytes read from this memory system.physmem.bytes_inst_read::total 50112 # Number of instructions bytes read from this memory -system.physmem.bytes_written::writebacks 67207872 # Number of bytes written to this memory -system.physmem.bytes_written::total 67207872 # Number of bytes written to this memory +system.physmem.bytes_written::writebacks 67194432 # Number of bytes written to this memory +system.physmem.bytes_written::total 67194432 # Number of bytes written to this memory system.physmem.num_reads::cpu.inst 783 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 2045808 # Number of read requests responded to by this memory -system.physmem.num_reads::total 2046591 # Number of read requests responded to by this memory -system.physmem.num_writes::writebacks 1050123 # Number of write requests responded to by this memory -system.physmem.num_writes::total 1050123 # Number of write requests responded to by this memory -system.physmem.bw_read::cpu.inst 44868 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 117231388 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 117276256 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 44868 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 44868 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_write::writebacks 60175430 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_write::total 60175430 # Write bandwidth from this memory (bytes/s) -system.physmem.bw_total::writebacks 60175430 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 44868 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 117231388 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 177451686 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 2046591 # Number of read requests accepted -system.physmem.writeReqs 1050123 # Number of write requests accepted -system.physmem.readBursts 2046591 # Number of DRAM read bursts, including those serviced by the write queue -system.physmem.writeBursts 1050123 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 130898176 # Total number of bytes read from DRAM -system.physmem.bytesReadWrQ 83648 # Total number of bytes read from write queue -system.physmem.bytesWritten 67206400 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 130981824 # Total read bytes from the system interface side -system.physmem.bytesWrittenSys 67207872 # Total written bytes from the system interface side -system.physmem.servicedByWrQ 1307 # Number of DRAM read bursts serviced by the write queue +system.physmem.num_reads::cpu.data 2045127 # Number of read requests responded to by this memory +system.physmem.num_reads::total 2045910 # Number of read requests responded to by this memory +system.physmem.num_writes::writebacks 1049913 # Number of write requests responded to by this memory +system.physmem.num_writes::total 1049913 # Number of write requests responded to by this memory +system.physmem.bw_read::cpu.inst 44424 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 116032122 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 116076546 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 44424 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 44424 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_write::writebacks 59567759 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_write::total 59567759 # Write bandwidth from this memory (bytes/s) +system.physmem.bw_total::writebacks 59567759 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 44424 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 116032122 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 175644306 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 2045910 # Number of read requests accepted +system.physmem.writeReqs 1049913 # Number of write requests accepted +system.physmem.readBursts 2045910 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.writeBursts 1049913 # Number of DRAM write bursts, including those merged in the write queue +system.physmem.bytesReadDRAM 130851840 # Total number of bytes read from DRAM +system.physmem.bytesReadWrQ 86400 # Total number of bytes read from write queue +system.physmem.bytesWritten 67192960 # Total number of bytes written to DRAM +system.physmem.bytesReadSys 130938240 # Total read bytes from the system interface side +system.physmem.bytesWrittenSys 67194432 # Total written bytes from the system interface side +system.physmem.servicedByWrQ 1350 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 127279 # Per bank write bursts -system.physmem.perBankRdBursts::1 124661 # Per bank write bursts -system.physmem.perBankRdBursts::2 121601 # Per bank write bursts -system.physmem.perBankRdBursts::3 123656 # Per bank write bursts -system.physmem.perBankRdBursts::4 122620 # Per bank write bursts -system.physmem.perBankRdBursts::5 122679 # Per bank write bursts -system.physmem.perBankRdBursts::6 123247 # Per bank write bursts -system.physmem.perBankRdBursts::7 123770 # Per bank write bursts -system.physmem.perBankRdBursts::8 131396 # Per bank write bursts -system.physmem.perBankRdBursts::9 133511 # Per bank write bursts -system.physmem.perBankRdBursts::10 132081 # Per bank write bursts -system.physmem.perBankRdBursts::11 133308 # Per bank write bursts -system.physmem.perBankRdBursts::12 133249 # Per bank write bursts -system.physmem.perBankRdBursts::13 133362 # Per bank write bursts -system.physmem.perBankRdBursts::14 129309 # Per bank write bursts -system.physmem.perBankRdBursts::15 129555 # Per bank write bursts -system.physmem.perBankWrBursts::0 66136 # Per bank write bursts -system.physmem.perBankWrBursts::1 64410 # Per bank write bursts -system.physmem.perBankWrBursts::2 62576 # Per bank write bursts -system.physmem.perBankWrBursts::3 63006 # Per bank write bursts -system.physmem.perBankWrBursts::4 63000 # Per bank write bursts -system.physmem.perBankWrBursts::5 63100 # Per bank write bursts -system.physmem.perBankWrBursts::6 64443 # Per bank write bursts -system.physmem.perBankWrBursts::7 65436 # Per bank write bursts -system.physmem.perBankWrBursts::8 67310 # Per bank write bursts -system.physmem.perBankWrBursts::9 67797 # Per bank write bursts -system.physmem.perBankWrBursts::10 67549 # Per bank write bursts -system.physmem.perBankWrBursts::11 67882 # Per bank write bursts -system.physmem.perBankWrBursts::12 67326 # Per bank write bursts -system.physmem.perBankWrBursts::13 67793 # Per bank write bursts -system.physmem.perBankWrBursts::14 66482 # Per bank write bursts -system.physmem.perBankWrBursts::15 65854 # Per bank write bursts +system.physmem.perBankRdBursts::0 127234 # Per bank write bursts +system.physmem.perBankRdBursts::1 124635 # Per bank write bursts +system.physmem.perBankRdBursts::2 121565 # Per bank write bursts +system.physmem.perBankRdBursts::3 123578 # Per bank write bursts +system.physmem.perBankRdBursts::4 122544 # Per bank write bursts +system.physmem.perBankRdBursts::5 122632 # Per bank write bursts +system.physmem.perBankRdBursts::6 123221 # Per bank write bursts +system.physmem.perBankRdBursts::7 123735 # Per bank write bursts +system.physmem.perBankRdBursts::8 131340 # Per bank write bursts +system.physmem.perBankRdBursts::9 133478 # Per bank write bursts +system.physmem.perBankRdBursts::10 132036 # Per bank write bursts +system.physmem.perBankRdBursts::11 133242 # Per bank write bursts +system.physmem.perBankRdBursts::12 133211 # Per bank write bursts +system.physmem.perBankRdBursts::13 133326 # Per bank write bursts +system.physmem.perBankRdBursts::14 129274 # Per bank write bursts +system.physmem.perBankRdBursts::15 129509 # Per bank write bursts +system.physmem.perBankWrBursts::0 66120 # Per bank write bursts +system.physmem.perBankWrBursts::1 64398 # Per bank write bursts +system.physmem.perBankWrBursts::2 62563 # Per bank write bursts +system.physmem.perBankWrBursts::3 62980 # Per bank write bursts +system.physmem.perBankWrBursts::4 62981 # Per bank write bursts +system.physmem.perBankWrBursts::5 63086 # Per bank write bursts +system.physmem.perBankWrBursts::6 64437 # Per bank write bursts +system.physmem.perBankWrBursts::7 65431 # Per bank write bursts +system.physmem.perBankWrBursts::8 67296 # Per bank write bursts +system.physmem.perBankWrBursts::9 67792 # Per bank write bursts +system.physmem.perBankWrBursts::10 67535 # Per bank write bursts +system.physmem.perBankWrBursts::11 67858 # Per bank write bursts +system.physmem.perBankWrBursts::12 67312 # Per bank write bursts +system.physmem.perBankWrBursts::13 67784 # Per bank write bursts +system.physmem.perBankWrBursts::14 66474 # Per bank write bursts +system.physmem.perBankWrBursts::15 65843 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 1116865574000 # Total gap between requests +system.physmem.totGap 1128033469500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 2046591 # Read request sizes (log2) +system.physmem.readPktSize::6 2045910 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) system.physmem.writePktSize::3 0 # Write request sizes (log2) system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) -system.physmem.writePktSize::6 1050123 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 1916619 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 128648 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 17 # What read queue length does an incoming req see +system.physmem.writePktSize::6 1049913 # Write request sizes (log2) +system.physmem.rdQLenPdf::0 1917702 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 126844 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 14 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -145,30 +145,30 @@ system.physmem.wrQLenPdf::11 1 # Wh system.physmem.wrQLenPdf::12 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::13 1 # What write queue length does an incoming req see system.physmem.wrQLenPdf::14 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::15 32746 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::16 33984 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::17 56911 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::18 61204 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::19 61629 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::20 61690 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::21 61591 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::22 61663 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::23 61651 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::24 61697 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::25 61747 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::26 61696 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::27 62170 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::28 62557 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::29 62067 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::30 62573 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::31 61301 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::32 61138 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::33 84 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::34 5 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::35 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::36 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::37 1 # What write queue length does an incoming req see -system.physmem.wrQLenPdf::38 1 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::15 32849 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::16 34013 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::17 57015 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::18 61217 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::19 61623 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::20 61654 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::21 61600 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::22 61647 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::23 61568 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::24 61682 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::25 61684 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::26 61622 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::27 62149 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::28 62542 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::29 61998 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::30 62533 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::31 61281 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::32 61114 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::33 97 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::34 8 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::35 2 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::36 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::37 0 # What write queue length does an incoming req see +system.physmem.wrQLenPdf::38 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::39 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::40 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::41 0 # What write queue length does an incoming req see @@ -194,113 +194,113 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 1910138 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 103.711175 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 81.836423 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 125.540224 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 1485349 77.76% 77.76% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 305158 15.98% 93.74% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 52532 2.75% 96.49% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 21047 1.10% 97.59% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 13374 0.70% 98.29% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 7565 0.40% 98.69% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 5491 0.29% 98.97% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 5162 0.27% 99.24% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 14460 0.76% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 1910138 # Bytes accessed per row activation -system.physmem.rdPerTurnAround::samples 61136 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::mean 33.411672 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::stdev 159.590236 # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::0-1023 61090 99.92% 99.92% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::1024-2047 21 0.03% 99.96% # Reads before turning the bus around for writes +system.physmem.bytesPerActivate::samples 1910047 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 103.685692 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 81.827100 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 125.490486 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 1485463 77.77% 77.77% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 305174 15.98% 93.75% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 52509 2.75% 96.50% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 20929 1.10% 97.59% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 13256 0.69% 98.29% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 7619 0.40% 98.69% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 5519 0.29% 98.97% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 5102 0.27% 99.24% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 14476 0.76% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 1910047 # Bytes accessed per row activation +system.physmem.rdPerTurnAround::samples 61113 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::mean 33.412400 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::stdev 159.518866 # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::0-1023 61065 99.92% 99.92% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::1024-2047 24 0.04% 99.96% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::2048-3071 10 0.02% 99.98% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::3072-4095 7 0.01% 99.99% # Reads before turning the bus around for writes +system.physmem.rdPerTurnAround::3072-4095 6 0.01% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::4096-5119 3 0.00% 99.99% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::9216-10239 2 0.00% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::13312-14335 1 0.00% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::15360-16383 1 0.00% 100.00% # Reads before turning the bus around for writes system.physmem.rdPerTurnAround::22528-23551 1 0.00% 100.00% # Reads before turning the bus around for writes -system.physmem.rdPerTurnAround::total 61136 # Reads before turning the bus around for writes -system.physmem.wrPerTurnAround::samples 61136 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::mean 17.176459 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::gmean 17.141461 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::stdev 1.097536 # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::16 27008 44.18% 44.18% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::17 1128 1.85% 46.02% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::18 28688 46.92% 92.95% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::19 3895 6.37% 99.32% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::20 363 0.59% 99.91% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::21 46 0.08% 99.99% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::22 6 0.01% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::23 1 0.00% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::26 1 0.00% 100.00% # Writes before turning the bus around for reads -system.physmem.wrPerTurnAround::total 61136 # Writes before turning the bus around for reads -system.physmem.totQLat 38124700750 # Total ticks spent queuing -system.physmem.totMemAccLat 76473775750 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 10226420000 # Total ticks spent in databus transfers -system.physmem.avgQLat 18640.30 # Average queueing delay per DRAM burst +system.physmem.rdPerTurnAround::total 61113 # Reads before turning the bus around for writes +system.physmem.wrPerTurnAround::samples 61113 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::mean 17.179487 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::gmean 17.144319 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::stdev 1.100540 # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::16 26981 44.15% 44.15% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::17 1028 1.68% 45.83% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::18 28814 47.15% 92.98% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::19 3825 6.26% 99.24% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::20 400 0.65% 99.89% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::21 47 0.08% 99.97% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::22 11 0.02% 99.99% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::23 6 0.01% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::24 1 0.00% 100.00% # Writes before turning the bus around for reads +system.physmem.wrPerTurnAround::total 61113 # Writes before turning the bus around for reads +system.physmem.totQLat 38097515250 # Total ticks spent queuing +system.physmem.totMemAccLat 76433015250 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 10222800000 # Total ticks spent in databus transfers +system.physmem.avgQLat 18633.60 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 37390.30 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 117.20 # Average DRAM read bandwidth in MiByte/s -system.physmem.avgWrBW 60.17 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 117.28 # Average system read bandwidth in MiByte/s -system.physmem.avgWrBWSys 60.18 # Average system write bandwidth in MiByte/s +system.physmem.avgMemAccLat 37383.60 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 116.00 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgWrBW 59.57 # Average achieved write bandwidth in MiByte/s +system.physmem.avgRdBWSys 116.08 # Average system read bandwidth in MiByte/s +system.physmem.avgWrBWSys 59.57 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s -system.physmem.busUtil 1.39 # Data bus utilization in percentage -system.physmem.busUtilRead 0.92 # Data bus utilization in percentage for reads +system.physmem.busUtil 1.37 # Data bus utilization in percentage +system.physmem.busUtilRead 0.91 # Data bus utilization in percentage for reads system.physmem.busUtilWrite 0.47 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.02 # Average read queue length when enqueuing -system.physmem.avgWrQLen 24.32 # Average write queue length when enqueuing -system.physmem.readRowHits 773341 # Number of row buffer hits during reads -system.physmem.writeRowHits 411895 # Number of row buffer hits during writes -system.physmem.readRowHitRate 37.81 # Row buffer hit rate for reads -system.physmem.writeRowHitRate 39.22 # Row buffer hit rate for writes -system.physmem.avgGap 360661.52 # Average gap between requests -system.physmem.pageHitRate 38.29 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 7039078200 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 3840766875 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 7717881600 # Energy for read commands per rank (pJ) -system.physmem_0.writeEnergy 3318453360 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 72947846400 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 420697412235 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 301083150000 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 816644588670 # Total energy per rank (pJ) -system.physmem_0.averagePower 731.196952 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 498171344000 # Time in different power states -system.physmem_0.memoryStateTime::REF 37294400000 # Time in different power states +system.physmem.avgWrQLen 24.54 # Average write queue length when enqueuing +system.physmem.readRowHits 772369 # Number of row buffer hits during reads +system.physmem.writeRowHits 412032 # Number of row buffer hits during writes +system.physmem.readRowHitRate 37.78 # Row buffer hit rate for reads +system.physmem.writeRowHitRate 39.24 # Row buffer hit rate for writes +system.physmem.avgGap 364372.73 # Average gap between requests +system.physmem.pageHitRate 38.27 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 7040703600 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 3841653750 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 7715315400 # Energy for read commands per rank (pJ) +system.physmem_0.writeEnergy 3317734080 # Energy for write commands per rank (pJ) +system.physmem_0.refreshEnergy 73677630000 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 423036881190 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 305734953750 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 824364871770 # Total energy per rank (pJ) +system.physmem_0.averagePower 730.798394 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 505893058250 # Time in different power states +system.physmem_0.memoryStateTime::REF 37667500000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 581396539000 # Time in different power states +system.physmem_0.memoryStateTime::ACT 584472684250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 7401549960 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 4038544125 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 8234959200 # Energy for read commands per rank (pJ) -system.physmem_1.writeEnergy 3486194640 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 72947846400 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 429293377035 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 293542830000 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 818945301360 # Total energy per rank (pJ) -system.physmem_1.averagePower 733.256935 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 485580062750 # Time in different power states -system.physmem_1.memoryStateTime::REF 37294400000 # Time in different power states +system.physmem_1.actEnergy 7399251720 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 4037290125 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 8232221400 # Energy for read commands per rank (pJ) +system.physmem_1.writeEnergy 3485553120 # Energy for write commands per rank (pJ) +system.physmem_1.refreshEnergy 73677630000 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 432494110575 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 297439138500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 826765195440 # Total energy per rank (pJ) +system.physmem_1.averagePower 732.926278 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 492041493250 # Time in different power states +system.physmem_1.memoryStateTime::REF 37667500000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 593987729250 # Time in different power states +system.physmem_1.memoryStateTime::ACT 598324400250 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 239639355 # Number of BP lookups -system.cpu.branchPred.condPredicted 186342486 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 14526193 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 130646338 # Number of BTB lookups -system.cpu.branchPred.BTBHits 122079091 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 240019627 # Number of BP lookups +system.cpu.branchPred.condPredicted 186610234 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 14528957 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 131647639 # Number of BTB lookups +system.cpu.branchPred.BTBHits 122324320 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 93.442413 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 15657057 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 92.917975 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 15657430 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 15 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 537 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 230 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 307 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 164 # Number of mispredicted indirect branches. +system.cpu.branchPred.indirectLookups 534 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 232 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 302 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 162 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -330,7 +330,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -360,7 +360,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -390,7 +390,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -421,16 +421,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 46 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 2233731337 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 2256067127 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 1544563088 # Number of instructions committed system.cpu.committedOps 1664032481 # Number of ops (including micro ops) committed -system.cpu.discardedOps 41470388 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 41363716 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.446190 # CPI: cycles per instruction -system.cpu.ipc 0.691472 # IPC: instructions per cycle +system.cpu.cpi 1.460651 # CPI: cycles per instruction +system.cpu.ipc 0.684626 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 1030178776 61.91% 61.91% # Class of committed instruction system.cpu.op_class_0::IntMult 700322 0.04% 61.95% # Class of committed instruction @@ -466,61 +466,61 @@ system.cpu.op_class_0::MemWrite 174847046 10.51% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 1664032481 # Class of committed instruction -system.cpu.tickCycles 1834123667 # Number of cycles that the object actually ticked -system.cpu.idleCycles 399607670 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.tags.replacements 9221041 # number of replacements -system.cpu.dcache.tags.tagsinuse 4085.616095 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 624218928 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 9225137 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 67.665004 # Average number of references to valid blocks. -system.cpu.dcache.tags.warmup_cycle 9804990500 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 4085.616095 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.997465 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.997465 # Average percentage of cache occupancy +system.cpu.tickCycles 1844612574 # Number of cycles that the object actually ticked +system.cpu.idleCycles 411454553 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.tags.replacements 9220101 # number of replacements +system.cpu.dcache.tags.tagsinuse 4085.702912 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 624495427 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 9224197 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 67.701874 # Average number of references to valid blocks. +system.cpu.dcache.tags.warmup_cycle 9818932500 # Cycle when the warmup percentage was hit. +system.cpu.dcache.tags.occ_blocks::cpu.data 4085.702912 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.997486 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.997486 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 4096 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::0 251 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::1 1231 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::0 241 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::1 1240 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::2 2553 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::3 61 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::3 62 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 1 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 1276841941 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 1276841941 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 453887732 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 453887732 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 170331073 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 170331073 # number of WriteReq hits +system.cpu.dcache.tags.tag_accesses 1277391791 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 1277391791 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 454164210 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 454164210 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 170331094 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 170331094 # number of WriteReq hits system.cpu.dcache.SoftPFReq_hits::cpu.data 1 # number of SoftPFReq hits system.cpu.dcache.SoftPFReq_hits::total 1 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 61 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 61 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 61 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 61 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 624218805 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 624218805 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 624218806 # number of overall hits -system.cpu.dcache.overall_hits::total 624218806 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 7334498 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 7334498 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 2254974 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 2254974 # number of WriteReq misses +system.cpu.dcache.demand_hits::cpu.data 624495304 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 624495304 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 624495305 # number of overall hits +system.cpu.dcache.overall_hits::total 624495305 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 7333415 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 7333415 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 2254953 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 2254953 # number of WriteReq misses system.cpu.dcache.SoftPFReq_misses::cpu.data 2 # number of SoftPFReq misses system.cpu.dcache.SoftPFReq_misses::total 2 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 9589472 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 9589472 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 9589474 # number of overall misses -system.cpu.dcache.overall_misses::total 9589474 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 190926660000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 190926660000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 109083916000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 109083916000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 300010576000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 300010576000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 300010576000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 300010576000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 461222230 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 461222230 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 9588368 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 9588368 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 9588370 # number of overall misses +system.cpu.dcache.overall_misses::total 9588370 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 190988166000 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 190988166000 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 108977258000 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 108977258000 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 299965424000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 299965424000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 299965424000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 299965424000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 461497625 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 461497625 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 172586047 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 172586047 # number of WriteReq accesses(hits+misses) system.cpu.dcache.SoftPFReq_accesses::cpu.data 3 # number of SoftPFReq accesses(hits+misses) @@ -529,404 +529,406 @@ system.cpu.dcache.LoadLockedReq_accesses::cpu.data 61 system.cpu.dcache.LoadLockedReq_accesses::total 61 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 61 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 61 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 633808277 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 633808277 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 633808280 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 633808280 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.015902 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.015902 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 634083672 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 634083672 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 634083675 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 634083675 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.015890 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.015890 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.013066 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.013066 # miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.666667 # miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_miss_rate::total 0.666667 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.015130 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.015130 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.015130 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.015130 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 26031.319390 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 26031.319390 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 48374.799887 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 48374.799887 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 31285.411334 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 31285.411334 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 31285.404809 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 31285.404809 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.015122 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.015122 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.015122 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.015122 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 26043.550788 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 26043.550788 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 48327.950960 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 48327.950960 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 31284.304482 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 31284.304482 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 31284.297957 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 31284.297957 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.dcache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.dcache.writebacks::writebacks 3684567 # number of writebacks -system.cpu.dcache.writebacks::total 3684567 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 215 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 215 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 364121 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 364121 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 364336 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 364336 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 364336 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 364336 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 7334283 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 7334283 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1890853 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 1890853 # number of WriteReq MSHR misses +system.cpu.dcache.writebacks::writebacks 3684499 # number of writebacks +system.cpu.dcache.writebacks::total 3684499 # number of writebacks +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 49 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 49 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 364123 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 364123 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 364172 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 364172 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 364172 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 364172 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 7333366 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 7333366 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1890830 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 1890830 # number of WriteReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::cpu.data 1 # number of SoftPFReq MSHR misses system.cpu.dcache.SoftPFReq_mshr_misses::total 1 # number of SoftPFReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 9225136 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 9225136 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 9225137 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 9225137 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 183586477500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 183586477500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 84779361000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 84779361000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_misses::cpu.data 9224196 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 9224196 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 9224197 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 9224197 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 183652478000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 183652478000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 84692070000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 84692070000 # number of WriteReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 74000 # number of SoftPFReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 74000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 268365838500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 268365838500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 268365912500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 268365912500 # number of overall MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.015902 # mshr miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.015902 # mshr miss rate for ReadReq accesses +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 268344548000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 268344548000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 268344622000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 268344622000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.015890 # mshr miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.015890 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.010956 # mshr miss rate for WriteReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::total 0.010956 # mshr miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::cpu.data 0.333333 # mshr miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_mshr_miss_rate::total 0.333333 # mshr miss rate for SoftPFReq accesses -system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.014555 # mshr miss rate for demand accesses -system.cpu.dcache.demand_mshr_miss_rate::total 0.014555 # mshr miss rate for demand accesses -system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.014555 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate::total 0.014555 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 25031.278109 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 25031.278109 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 44836.568998 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 44836.568998 # average WriteReq mshr miss latency +system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.014547 # mshr miss rate for demand accesses +system.cpu.dcache.demand_mshr_miss_rate::total 0.014547 # mshr miss rate for demand accesses +system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.014547 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate::total 0.014547 # mshr miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 25043.408170 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 25043.408170 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 44790.948948 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 44790.948948 # average WriteReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 74000 # average SoftPFReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 74000 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 29090.718934 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 29090.718934 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 29090.723802 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 29090.723802 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 29 # number of replacements -system.cpu.icache.tags.tagsinuse 660.385482 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 465281510 # Total number of references to valid blocks. +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 29091.375335 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 29091.375335 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 29091.380204 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 29091.380204 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 30 # number of replacements +system.cpu.icache.tags.tagsinuse 660.287317 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 466254411 # Total number of references to valid blocks. system.cpu.icache.tags.sampled_refs 819 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 568109.291819 # Average number of references to valid blocks. +system.cpu.icache.tags.avg_refs 569297.205128 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 660.385482 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.322454 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.322454 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 790 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 660.287317 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.322406 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.322406 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 789 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 32 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::2 5 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 753 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.385742 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 930565477 # Number of tag accesses -system.cpu.icache.tags.data_accesses 930565477 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 465281510 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 465281510 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 465281510 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 465281510 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 465281510 # number of overall hits -system.cpu.icache.overall_hits::total 465281510 # number of overall hits +system.cpu.icache.tags.age_task_id_blocks_1024::4 752 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.385254 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 932511279 # Number of tag accesses +system.cpu.icache.tags.data_accesses 932511279 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 466254411 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 466254411 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 466254411 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 466254411 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 466254411 # number of overall hits +system.cpu.icache.overall_hits::total 466254411 # number of overall hits system.cpu.icache.ReadReq_misses::cpu.inst 819 # number of ReadReq misses system.cpu.icache.ReadReq_misses::total 819 # number of ReadReq misses system.cpu.icache.demand_misses::cpu.inst 819 # number of demand (read+write) misses system.cpu.icache.demand_misses::total 819 # number of demand (read+write) misses system.cpu.icache.overall_misses::cpu.inst 819 # number of overall misses system.cpu.icache.overall_misses::total 819 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 62402500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 62402500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 62402500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 62402500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 62402500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 62402500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 465282329 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 465282329 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 465282329 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 465282329 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 465282329 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 465282329 # number of overall (read+write) accesses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 61690000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 61690000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 61690000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 61690000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 61690000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 61690000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 466255230 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 466255230 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 466255230 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 466255230 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 466255230 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 466255230 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000002 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000002 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000002 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000002 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000002 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000002 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 76193.528694 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 76193.528694 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 76193.528694 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 76193.528694 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 76193.528694 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 76193.528694 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 75323.565324 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 75323.565324 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 75323.565324 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 75323.565324 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 75323.565324 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 75323.565324 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 29 # number of writebacks -system.cpu.icache.writebacks::total 29 # number of writebacks +system.cpu.icache.writebacks::writebacks 30 # number of writebacks +system.cpu.icache.writebacks::total 30 # number of writebacks system.cpu.icache.ReadReq_mshr_misses::cpu.inst 819 # number of ReadReq MSHR misses system.cpu.icache.ReadReq_mshr_misses::total 819 # number of ReadReq MSHR misses system.cpu.icache.demand_mshr_misses::cpu.inst 819 # number of demand (read+write) MSHR misses system.cpu.icache.demand_mshr_misses::total 819 # number of demand (read+write) MSHR misses system.cpu.icache.overall_mshr_misses::cpu.inst 819 # number of overall MSHR misses system.cpu.icache.overall_mshr_misses::total 819 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 61583500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 61583500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 61583500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 61583500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 61583500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 61583500 # number of overall MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 60871000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 60871000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 60871000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 60871000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 60871000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 60871000 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000002 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000002 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000002 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000002 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 75193.528694 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 75193.528694 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 75193.528694 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 75193.528694 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 75193.528694 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 75193.528694 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.tags.replacements 2013919 # number of replacements -system.cpu.l2cache.tags.tagsinuse 31258.258362 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 14509191 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 2043695 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 7.099489 # Average number of references to valid blocks. -system.cpu.l2cache.tags.warmup_cycle 59769702000 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 14832.909506 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 26.456768 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 16398.892088 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.452664 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.000807 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.500454 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.953926 # Average percentage of cache occupancy +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 74323.565324 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 74323.565324 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 74323.565324 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 74323.565324 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 74323.565324 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 74323.565324 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.replacements 2013239 # number of replacements +system.cpu.l2cache.tags.tagsinuse 31266.385554 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 14508014 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 2043015 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 7.101276 # Average number of references to valid blocks. +system.cpu.l2cache.tags.warmup_cycle 59831992000 # Cycle when the warmup percentage was hit. +system.cpu.l2cache.tags.occ_blocks::writebacks 14855.828649 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 26.313947 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 16384.242958 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.453364 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.000803 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.500007 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.954174 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_task_id_blocks::1024 29776 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::0 93 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 31 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 1250 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 12849 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 1246 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 12853 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::4 15553 # Occupied blocks per task id system.cpu.l2cache.tags.occ_task_id_percent::1024 0.908691 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 151498004 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 151498004 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.l2cache.WritebackDirty_hits::writebacks 3684567 # number of WritebackDirty hits -system.cpu.l2cache.WritebackDirty_hits::total 3684567 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 29 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 29 # number of WritebackClean hits -system.cpu.l2cache.ReadExReq_hits::cpu.data 1089694 # number of ReadExReq hits -system.cpu.l2cache.ReadExReq_hits::total 1089694 # number of ReadExReq hits +system.cpu.l2cache.tags.tag_accesses 151482269 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 151482269 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.WritebackDirty_hits::writebacks 3684499 # number of WritebackDirty hits +system.cpu.l2cache.WritebackDirty_hits::total 3684499 # number of WritebackDirty hits +system.cpu.l2cache.WritebackClean_hits::writebacks 30 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 30 # number of WritebackClean hits +system.cpu.l2cache.ReadExReq_hits::cpu.data 1089818 # number of ReadExReq hits +system.cpu.l2cache.ReadExReq_hits::total 1089818 # number of ReadExReq hits system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 36 # number of ReadCleanReq hits system.cpu.l2cache.ReadCleanReq_hits::total 36 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 6089630 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 6089630 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 6089246 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 6089246 # number of ReadSharedReq hits system.cpu.l2cache.demand_hits::cpu.inst 36 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 7179324 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 7179360 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 7179064 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 7179100 # number of demand (read+write) hits system.cpu.l2cache.overall_hits::cpu.inst 36 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 7179324 # number of overall hits -system.cpu.l2cache.overall_hits::total 7179360 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 801159 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 801159 # number of ReadExReq misses +system.cpu.l2cache.overall_hits::cpu.data 7179064 # number of overall hits +system.cpu.l2cache.overall_hits::total 7179100 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 801012 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 801012 # number of ReadExReq misses system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 783 # number of ReadCleanReq misses system.cpu.l2cache.ReadCleanReq_misses::total 783 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1244654 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 1244654 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 1244121 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 1244121 # number of ReadSharedReq misses system.cpu.l2cache.demand_misses::cpu.inst 783 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 2045813 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 2046596 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 2045133 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 2045916 # number of demand (read+write) misses system.cpu.l2cache.overall_misses::cpu.inst 783 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 2045813 # number of overall misses -system.cpu.l2cache.overall_misses::total 2046596 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 70441435500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 70441435500 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 59945000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 59945000 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 108637226500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 108637226500 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 59945000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 179078662000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 179138607000 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 59945000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 179078662000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 179138607000 # number of overall miss cycles -system.cpu.l2cache.WritebackDirty_accesses::writebacks 3684567 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackDirty_accesses::total 3684567 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 29 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 29 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 1890853 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 1890853 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.overall_misses::cpu.data 2045133 # number of overall misses +system.cpu.l2cache.overall_misses::total 2045916 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 70389294000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 70389294000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 59232000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 59232000 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 108712178500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 108712178500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 59232000 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 179101472500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 179160704500 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 59232000 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 179101472500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 179160704500 # number of overall miss cycles +system.cpu.l2cache.WritebackDirty_accesses::writebacks 3684499 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackDirty_accesses::total 3684499 # number of WritebackDirty accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 30 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 30 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 1890830 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 1890830 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 819 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadCleanReq_accesses::total 819 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 7334284 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 7334284 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 7333367 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 7333367 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.demand_accesses::cpu.inst 819 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 9225137 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 9225956 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 9224197 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 9225016 # number of demand (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.inst 819 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 9225137 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 9225956 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.423702 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.423702 # miss rate for ReadExReq accesses +system.cpu.l2cache.overall_accesses::cpu.data 9224197 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 9225016 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.423630 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.423630 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.956044 # miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.956044 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.169704 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.169704 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.169652 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.169652 # miss rate for ReadSharedReq accesses system.cpu.l2cache.demand_miss_rate::cpu.inst 0.956044 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.221765 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.221830 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.221714 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.221779 # miss rate for demand accesses system.cpu.l2cache.overall_miss_rate::cpu.inst 0.956044 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.221765 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.221830 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 87924.413880 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 87924.413880 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 76558.109834 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 76558.109834 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 87283.073449 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 87283.073449 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 76558.109834 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 87534.228202 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 87530.028887 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 76558.109834 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 87534.228202 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 87530.028887 # average overall miss latency +system.cpu.l2cache.overall_miss_rate::cpu.data 0.221714 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.221779 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 87875.455049 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 87875.455049 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 75647.509579 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 75647.509579 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 87380.711764 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 87380.711764 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 75647.509579 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 87574.486598 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 87569.921981 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 75647.509579 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 87574.486598 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 87569.921981 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.writebacks::writebacks 1050123 # number of writebacks -system.cpu.l2cache.writebacks::total 1050123 # number of writebacks -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 5 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 5 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 5 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::total 5 # number of demand (read+write) MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 5 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::total 5 # number of overall MSHR hits +system.cpu.l2cache.writebacks::writebacks 1049913 # number of writebacks +system.cpu.l2cache.writebacks::total 1049913 # number of writebacks +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 6 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 6 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 6 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::total 6 # number of demand (read+write) MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.data 6 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::total 6 # number of overall MSHR hits system.cpu.l2cache.CleanEvict_mshr_misses::writebacks 214 # number of CleanEvict MSHR misses system.cpu.l2cache.CleanEvict_mshr_misses::total 214 # number of CleanEvict MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 801159 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 801159 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 801012 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 801012 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 783 # number of ReadCleanReq MSHR misses system.cpu.l2cache.ReadCleanReq_mshr_misses::total 783 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 1244649 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 1244649 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 1244115 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 1244115 # number of ReadSharedReq MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.inst 783 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 2045808 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 2046591 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 2045127 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 2045910 # number of demand (read+write) MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.inst 783 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 2045808 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 2046591 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 62429845500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 62429845500 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 52115000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 52115000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 96190393500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 96190393500 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 52115000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 158620239000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 158672354000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 52115000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 158620239000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 158672354000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::cpu.data 2045127 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 2045910 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 62379174000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 62379174000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 51402000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 51402000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 96270618000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 96270618000 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 51402000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 158649792000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 158701194000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 51402000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 158649792000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 158701194000 # number of overall MSHR miss cycles system.cpu.l2cache.CleanEvict_mshr_miss_rate::writebacks inf # mshr miss rate for CleanEvict accesses system.cpu.l2cache.CleanEvict_mshr_miss_rate::total inf # mshr miss rate for CleanEvict accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.423702 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.423702 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.423630 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.423630 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.956044 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.956044 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.169703 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.169703 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.169651 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.169651 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.956044 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.221765 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.221830 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.221713 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.221778 # mshr miss rate for demand accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.956044 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.221765 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.221830 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 77924.413880 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 77924.413880 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 66558.109834 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 66558.109834 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 77283.148502 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 77283.148502 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 66558.109834 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 77534.274477 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 77530.075135 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 66558.109834 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 77534.274477 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 77530.075135 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 18447026 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 9221082 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.221713 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.221778 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 77875.455049 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 77875.455049 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 65647.509579 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 65647.509579 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 77380.803222 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 77380.803222 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 65647.509579 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 77574.542803 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 77569.978152 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 65647.509579 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 77574.542803 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 77569.978152 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 18445147 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 9220143 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 1594 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.snoop_filter.tot_snoops 1286 # Total number of snoops made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_snoops 1280 # Number of snoops hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.tot_snoops 1285 # Total number of snoops made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_snoops 1279 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 6 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 7335103 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackDirty 4734690 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 29 # Transaction distribution -system.cpu.toL2Bus.trans_dist::CleanEvict 6500270 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 1890853 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 1890853 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 7334186 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackDirty 4734412 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 30 # Transaction distribution +system.cpu.toL2Bus.trans_dist::CleanEvict 6498928 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExReq 1890830 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 1890830 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadCleanReq 819 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 7334284 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1667 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 27671315 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 27672982 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 54272 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 826221056 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 826275328 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.snoops 2013919 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 11239875 # Request fanout histogram +system.cpu.toL2Bus.trans_dist::ReadSharedReq 7333367 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1668 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 27668495 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 27670163 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 54336 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 826156544 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 826210880 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.snoops 2013239 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 67194432 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 11238255 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000258 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.016088 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.016087 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 11236983 99.97% 99.97% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 2886 0.03% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 11235364 99.97% 99.97% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 2885 0.03% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 6 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 2 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 11239875 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 12908109000 # Layer occupancy (ticks) -system.cpu.toL2Bus.reqLayer0.utilization 1.2 # Layer utilization (%) +system.cpu.toL2Bus.snoop_fanout::total 11238255 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 12907102500 # Layer occupancy (ticks) +system.cpu.toL2Bus.reqLayer0.utilization 1.1 # Layer utilization (%) system.cpu.toL2Bus.respLayer0.occupancy 1228500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 13837707995 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 13836298494 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 1.2 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 1116865668500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 1245432 # Transaction distribution -system.membus.trans_dist::WritebackDirty 1050123 # Transaction distribution -system.membus.trans_dist::CleanEvict 962724 # Transaction distribution -system.membus.trans_dist::ReadExReq 801159 # Transaction distribution -system.membus.trans_dist::ReadExResp 801159 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 1245432 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 6106029 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 6106029 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 198189696 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 198189696 # Cumulative packet size per connected master and slave (bytes) +system.membus.pwrStateResidencyTicks::UNDEFINED 1128033563500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 1244898 # Transaction distribution +system.membus.trans_dist::WritebackDirty 1049913 # Transaction distribution +system.membus.trans_dist::CleanEvict 962255 # Transaction distribution +system.membus.trans_dist::ReadExReq 801012 # Transaction distribution +system.membus.trans_dist::ReadExResp 801012 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 1244898 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 6103988 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 6103988 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 198132672 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 198132672 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 4059438 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 4058078 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 4059438 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 4058078 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 4059438 # Request fanout histogram -system.membus.reqLayer0.occupancy 8663216000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 4058078 # Request fanout histogram +system.membus.reqLayer0.occupancy 8755432500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.8 # Layer utilization (%) -system.membus.respLayer1.occupancy 11191487250 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 11187827500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 1.0 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/config.ini b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/config.ini index 540dec5ab..48a3a5266 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simerr b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simerr index be90b0340..caeab8324 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simerr @@ -1,3 +1,4 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: CP14 unimplemented crn[8], opc1[2], crm[9], opc2[4] diff --git a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simout b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simout index 77417a942..3ee0ee7fa 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ARM/tests/opt/long/se/60.bzip2/arm/linux/o3-timing/s gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 23:07:21 -gem5 started Mar 16 2016 23:41:21 -gem5 executing on dinar2c11, pid 25849 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/60.bzip2/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 15:06:10 +gem5 executing on e108600-lin, pid 24215 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/arm/linux/o3-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/stats.txt index bd5e79823..3ae5dc097 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.767804 # Nu sim_ticks 767803843500 # Number of ticks simulated final_tick 767803843500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 232978 # Simulator instruction rate (inst/s) -host_op_rate 250999 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 115813638 # Simulator tick rate (ticks/s) -host_mem_usage 356264 # Number of bytes of host memory used -host_seconds 6629.65 # Real time elapsed on the host +host_inst_rate 219812 # Simulator instruction rate (inst/s) +host_op_rate 236814 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 109268932 # Simulator tick rate (ticks/s) +host_mem_usage 308968 # Number of bytes of host memory used +host_seconds 7026.74 # Real time elapsed on the host sim_insts 1544563024 # Number of instructions simulated sim_ops 1664032416 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1203,6 +1203,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 2176508224 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 2176614720 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 8842499 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 104697920 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 25847794 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.114446 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.320627 # Request fanout histogram @@ -1235,6 +1236,7 @@ system.membus.pkt_count::total 13984484 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 403793920 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 403793920 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 9311100 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/config.ini b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/config.ini index 1b535494d..0bd2c9396 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/config.ini +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -73,6 +79,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -106,9 +116,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -122,9 +137,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[4] @@ -182,9 +202,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -198,9 +223,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.membus.slave[3] @@ -218,7 +248,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -250,10 +280,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -268,11 +303,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simerr b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simerr +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simout b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simout index 6fb821b07..c1b3d9c87 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simout +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/simout @@ -1,10 +1,13 @@ +Redirecting stdout to build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-atomic/simout +Redirecting stderr to build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 23 2014 12:08:08 -gem5 started Jan 23 2014 18:13:20 -gem5 executing on u200540-lin -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-atomic -re tests/run.py build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-atomic +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:38:22 +gem5 executing on e108600-lin, pid 23077 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/arm/linux/simple-atomic + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... spec_init diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/stats.txt b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/stats.txt index 9d26db066..a861bb889 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/stats.txt +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.832017 # Nu sim_ticks 832017490500 # Number of ticks simulated final_tick 832017490500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 2178318 # Simulator instruction rate (inst/s) -host_op_rate 2346807 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1173405208 # Simulator tick rate (ticks/s) -host_mem_usage 302320 # Number of bytes of host memory used -host_seconds 709.06 # Real time elapsed on the host +host_inst_rate 1008264 # Simulator instruction rate (inst/s) +host_op_rate 1086251 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 543126570 # Simulator tick rate (ticks/s) +host_mem_usage 256604 # Number of bytes of host memory used +host_seconds 1531.90 # Real time elapsed on the host sim_insts 1544563042 # Number of instructions simulated sim_ops 1664032434 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -237,6 +237,7 @@ system.membus.pkt_size_system.cpu.icache_port::system.physmem.port 6178262360 system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 2205546063 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 8383808423 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 2172060895 # Request fanout histogram system.membus.snoop_fanout::mean 0.711106 # Request fanout histogram system.membus.snoop_fanout::stdev 0.453249 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/config.ini b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/config.ini index d42bc7142..65c2bbf99 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -55,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -72,6 +78,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -90,12 +100,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -114,8 +129,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -138,9 +158,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -154,9 +179,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -167,12 +197,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -191,8 +226,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -250,9 +290,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -266,9 +311,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -279,12 +329,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -303,8 +358,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -312,10 +372,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -346,7 +411,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -378,10 +443,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -396,11 +466,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simerr b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simerr +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simout b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simout index 8064c269e..4382bd2ba 100755 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simout +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/simout @@ -3,10 +3,11 @@ Redirecting stderr to build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-timi gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 23 2014 12:08:08 -gem5 started Jan 23 2014 18:15:41 -gem5 executing on u200540-lin -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-timing -re tests/run.py build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:49:25 +gem5 executing on e108600-lin, pid 23292 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/60.bzip2/arm/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/arm/linux/simple-timing + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... spec_init diff --git a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/stats.txt b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/stats.txt index 59601069e..e3d403cda 100644 --- a/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/arm/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 2.377030 # Nu sim_ticks 2377029670500 # Number of ticks simulated final_tick 2377029670500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1373046 # Simulator instruction rate (inst/s) -host_op_rate 1479650 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 2121040557 # Simulator tick rate (ticks/s) -host_mem_usage 312336 # Number of bytes of host memory used -host_seconds 1120.69 # Real time elapsed on the host +host_inst_rate 744525 # Simulator instruction rate (inst/s) +host_op_rate 802329 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1150119113 # Simulator tick rate (ticks/s) +host_mem_usage 266344 # Number of bytes of host memory used +host_seconds 2066.77 # Real time elapsed on the host sim_insts 1538759602 # Number of instructions simulated sim_ops 1658228915 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -623,6 +623,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 818983360 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 819024640 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 1919027 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 65352128 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 11034901 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000201 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.014186 # Request fanout histogram @@ -652,6 +653,7 @@ system.membus.pkt_count::total 5821611 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 190261824 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 190261824 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 3869897 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/config.ini b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/config.ini index 5a87f20e3..3f64cee84 100644 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/config.ini +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=atomic mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -67,6 +77,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -97,18 +111,28 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[4] [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[5] int_slave=system.membus.master[2] @@ -128,8 +152,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.membus.slave[3] @@ -147,7 +176,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/x86/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -179,9 +208,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -196,11 +231,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simerr b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simerr +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simout b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simout index ff491d90e..715860400 100755 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simout +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/simout @@ -1,10 +1,13 @@ +Redirecting stdout to build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-atomic/simout +Redirecting stderr to build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-atomic/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 22 2014 17:10:34 -gem5 started Jan 22 2014 21:14:55 -gem5 executing on u200540-lin -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-atomic -re tests/run.py build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-atomic +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:17 +gem5 executing on e108600-lin, pid 18539 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-atomic -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/x86/linux/simple-atomic + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... spec_init diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/stats.txt b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/stats.txt index 3f6006735..907fd74ca 100644 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/stats.txt +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-atomic/stats.txt @@ -4,11 +4,11 @@ sim_seconds 2.846007 # Nu sim_ticks 2846007227500 # Number of ticks simulated final_tick 2846007227500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1672243 # Simulator instruction rate (inst/s) -host_op_rate 2605507 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1582143797 # Simulator tick rate (ticks/s) -host_mem_usage 305608 # Number of bytes of host memory used -host_seconds 1798.83 # Real time elapsed on the host +host_inst_rate 953043 # Simulator instruction rate (inst/s) +host_op_rate 1484927 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 901693633 # Simulator tick rate (ticks/s) +host_mem_usage 259304 # Number of bytes of host memory used +host_seconds 3156.29 # Real time elapsed on the host sim_insts 3008081022 # Number of instructions simulated sim_ops 4686862596 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -120,6 +120,7 @@ system.membus.pkt_size_system.cpu.dcache_port::system.physmem.port 6568525137 system.membus.pkt_size_system.cpu.dcache_port::total 6568525137 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 38674388193 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 5690945966 # Request fanout histogram system.membus.snoop_fanout::mean 0.705196 # Request fanout histogram system.membus.snoop_fanout::stdev 0.455955 # Request fanout histogram diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/config.ini b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/config.ini index 1497b3733..1048d999e 100644 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/config.ini +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -51,6 +60,7 @@ branchPred=Null checker=Null clk_domain=system.cpu_clk_domain cpu_id=0 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -66,6 +76,10 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= @@ -89,13 +103,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -105,6 +124,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -113,8 +133,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -128,8 +153,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -139,13 +169,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -155,6 +190,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -163,18 +199,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -194,8 +240,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -205,13 +256,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -221,6 +277,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -229,19 +286,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -249,6 +318,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -263,7 +339,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/x86/linux/bzip2 +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/bzip2 gid=100 input=cin kvmInSE=false @@ -295,9 +371,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -312,11 +394,16 @@ type=SimpleMemory bandwidth=73.000000 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED eventq_index=0 in_addr_map=true latency=30000 latency_var=0 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null range=0:134217727 port=system.membus.master[0] diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simerr b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simerr index 1a4f96712..aadc3d011 100755 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simerr +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simerr @@ -1 +1,2 @@ warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simout b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simout index d2167f766..0337bc6ef 100755 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simout +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/simout @@ -1,10 +1,13 @@ +Redirecting stdout to build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-timing/simout +Redirecting stderr to build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-timing/simerr gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Jan 22 2014 17:10:34 -gem5 started Jan 22 2014 21:25:13 -gem5 executing on u200540-lin -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-timing -re tests/run.py build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:20 +gem5 executing on e108600-lin, pid 18569 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/60.bzip2/x86/linux/simple-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/60.bzip2/x86/linux/simple-timing + Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... spec_init @@ -24,4 +27,4 @@ Uncompressing Data Uncompressed data 1048576 bytes in length Uncompressed data compared correctly Tested 1MB buffer: OK! -Exiting @ tick 5882580526000 because target called exit() +Exiting @ tick 5895947852500 because target called exit() diff --git a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/stats.txt b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/stats.txt index 5b0c36dc3..33a716627 100644 --- a/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/stats.txt +++ b/tests/long/se/60.bzip2/ref/x86/linux/simple-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 5.895948 # Nu sim_ticks 5895947852500 # Number of ticks simulated final_tick 5895947852500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 1001702 # Simulator instruction rate (inst/s) -host_op_rate 1560742 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 1963371956 # Simulator tick rate (ticks/s) -host_mem_usage 316648 # Number of bytes of host memory used -host_seconds 3002.97 # Real time elapsed on the host +host_inst_rate 735742 # Simulator instruction rate (inst/s) +host_op_rate 1146353 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 1442081312 # Simulator tick rate (ticks/s) +host_mem_usage 269296 # Number of bytes of host memory used +host_seconds 4088.50 # Real time elapsed on the host sim_insts 3008081022 # Number of instructions simulated sim_ops 4686862596 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -480,6 +480,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 818905152 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 818948992 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 1919169 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 65426496 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 11032521 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.000091 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.009530 # Request fanout histogram @@ -511,6 +512,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 19034 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 190346176 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 190346176 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 3870249 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/config.ini b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/config.ini index 5ec95ce79..5e0a983c6 100644 --- a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/config.ini +++ b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -97,12 +107,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -118,11 +133,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -130,13 +152,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -146,6 +173,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -154,8 +182,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -553,13 +586,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -569,6 +607,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -577,8 +616,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -602,13 +646,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -618,6 +667,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -626,19 +676,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -646,6 +708,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -660,7 +729,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/twolf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/twolf gid=100 input=cin kvmInSE=false @@ -692,9 +761,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -738,6 +813,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -749,7 +825,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simerr b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simerr +++ b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simout b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simout index 606ce3744..9e68a8154 100755 --- a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simout +++ b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/simout @@ -3,10 +3,10 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/minor-t gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 20:55:41 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/minor-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4311 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/70.twolf/alpha/tru64/minor-timing Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -24,4 +24,4 @@ Authors: Carl Sechen, Bill Swartz 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 -122 123 124 Exiting @ tick 51910606500 because target called exit() +122 123 124 Exiting @ tick 53344764500 because target called exit() diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/stats.txt b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/stats.txt index fcad1aab0..78502d1ca 100644 --- a/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/stats.txt +++ b/tests/long/se/70.twolf/ref/alpha/tru64/minor-timing/stats.txt @@ -1,48 +1,48 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.051906 # Number of seconds simulated -sim_ticks 51905634500 # Number of ticks simulated -final_tick 51905634500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.053345 # Number of seconds simulated +sim_ticks 53344764500 # Number of ticks simulated +final_tick 53344764500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 509703 # Simulator instruction rate (inst/s) -host_op_rate 509703 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 287873591 # Simulator tick rate (ticks/s) -host_mem_usage 300976 # Number of bytes of host memory used -host_seconds 180.31 # Real time elapsed on the host +host_inst_rate 260335 # Simulator instruction rate (inst/s) +host_op_rate 260335 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 151110624 # Simulator tick rate (ticks/s) +host_mem_usage 253412 # Number of bytes of host memory used +host_seconds 353.02 # Real time elapsed on the host sim_insts 91903089 # Number of instructions simulated sim_ops 91903089 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 202816 # Number of bytes read from this memory -system.physmem.bytes_read::cpu.data 137664 # Number of bytes read from this memory -system.physmem.bytes_read::total 340480 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 202816 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 202816 # Number of instructions bytes read from this memory -system.physmem.num_reads::cpu.inst 3169 # Number of read requests responded to by this memory -system.physmem.num_reads::cpu.data 2151 # Number of read requests responded to by this memory -system.physmem.num_reads::total 5320 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 3907399 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 2652198 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 6559596 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 3907399 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 3907399 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 3907399 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 2652198 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 6559596 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 5320 # Number of read requests accepted +system.physmem.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 202880 # Number of bytes read from this memory +system.physmem.bytes_read::cpu.data 137728 # Number of bytes read from this memory +system.physmem.bytes_read::total 340608 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 202880 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 202880 # Number of instructions bytes read from this memory +system.physmem.num_reads::cpu.inst 3170 # Number of read requests responded to by this memory +system.physmem.num_reads::cpu.data 2152 # Number of read requests responded to by this memory +system.physmem.num_reads::total 5322 # Number of read requests responded to by this memory +system.physmem.bw_read::cpu.inst 3803185 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 2581847 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 6385031 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 3803185 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 3803185 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 3803185 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 2581847 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 6385031 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 5322 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted -system.physmem.readBursts 5320 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 5322 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 0 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 340480 # Total number of bytes read from DRAM +system.physmem.bytesReadDRAM 340608 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 0 # Total number of bytes read from write queue system.physmem.bytesWritten 0 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 340480 # Total read bytes from the system interface side +system.physmem.bytesReadSys 340608 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 0 # Total written bytes from the system interface side system.physmem.servicedByWrQ 0 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one system.physmem.neitherReadNorWriteReqs 0 # Number of requests that are neither read nor write -system.physmem.perBankRdBursts::0 469 # Per bank write bursts +system.physmem.perBankRdBursts::0 468 # Per bank write bursts system.physmem.perBankRdBursts::1 295 # Per bank write bursts system.physmem.perBankRdBursts::2 308 # Per bank write bursts system.physmem.perBankRdBursts::3 524 # Per bank write bursts @@ -50,13 +50,13 @@ system.physmem.perBankRdBursts::4 224 # Pe system.physmem.perBankRdBursts::5 238 # Per bank write bursts system.physmem.perBankRdBursts::6 222 # Per bank write bursts system.physmem.perBankRdBursts::7 289 # Per bank write bursts -system.physmem.perBankRdBursts::8 252 # Per bank write bursts +system.physmem.perBankRdBursts::8 254 # Per bank write bursts system.physmem.perBankRdBursts::9 282 # Per bank write bursts system.physmem.perBankRdBursts::10 254 # Per bank write bursts system.physmem.perBankRdBursts::11 261 # Per bank write bursts system.physmem.perBankRdBursts::12 410 # Per bank write bursts system.physmem.perBankRdBursts::13 344 # Per bank write bursts -system.physmem.perBankRdBursts::14 500 # Per bank write bursts +system.physmem.perBankRdBursts::14 501 # Per bank write bursts system.physmem.perBankRdBursts::15 448 # Per bank write bursts system.physmem.perBankWrBursts::0 0 # Per bank write bursts system.physmem.perBankWrBursts::1 0 # Per bank write bursts @@ -76,14 +76,14 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 51905547000 # Total gap between requests +system.physmem.totGap 53344677500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 5320 # Read request sizes (log2) +system.physmem.readPktSize::6 5322 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 4923 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 378 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 19 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 4932 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 379 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 11 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -187,29 +187,29 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 982 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 346.395112 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 212.989816 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 328.326928 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 308 31.36% 31.36% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 213 21.69% 53.05% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 101 10.29% 63.34% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 90 9.16% 72.51% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 71 7.23% 79.74% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 37 3.77% 83.50% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 21 2.14% 85.64% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 29 2.95% 88.59% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 112 11.41% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 982 # Bytes accessed per row activation -system.physmem.totQLat 32661000 # Total ticks spent queuing -system.physmem.totMemAccLat 132411000 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 26600000 # Total ticks spent in databus transfers -system.physmem.avgQLat 6139.29 # Average queueing delay per DRAM burst +system.physmem.bytesPerActivate::samples 989 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 343.749242 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 211.692592 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 325.528362 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 314 31.75% 31.75% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 216 21.84% 53.59% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 88 8.90% 62.49% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 117 11.83% 74.32% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 52 5.26% 79.58% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 40 4.04% 83.62% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 29 2.93% 86.55% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 21 2.12% 88.68% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 112 11.32% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 989 # Bytes accessed per row activation +system.physmem.totQLat 40222250 # Total ticks spent queuing +system.physmem.totMemAccLat 140009750 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 26610000 # Total ticks spent in databus transfers +system.physmem.avgQLat 7557.73 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 24889.29 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 6.56 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 26307.73 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 6.39 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 6.56 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 6.39 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s system.physmem.busUtil 0.05 # Data bus utilization in percentage @@ -217,75 +217,75 @@ system.physmem.busUtilRead 0.05 # Da system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 4334 # Number of row buffer hits during reads +system.physmem.readRowHits 4331 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 81.47 # Row buffer hit rate for reads +system.physmem.readRowHitRate 81.38 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 9756681.77 # Average gap between requests -system.physmem.pageHitRate 81.47 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 3515400 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 1918125 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 19983600 # Energy for read commands per rank (pJ) +system.physmem.avgGap 10023426.81 # Average gap between requests +system.physmem.pageHitRate 81.38 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 3538080 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 1930500 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 20022600 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 3390060960 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 1736098875 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 29619147750 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 34770724710 # Total energy per rank (pJ) -system.physmem_0.averagePower 669.912241 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 49270880000 # Time in different power states -system.physmem_0.memoryStateTime::REF 1733160000 # Time in different power states +system.physmem_0.refreshEnergy 3484144560 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 1791514845 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 30434811000 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 35735961585 # Total energy per rank (pJ) +system.physmem_0.averagePower 669.917071 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 50627942250 # Time in different power states +system.physmem_0.memoryStateTime::REF 1781260000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 899376250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 934855250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 3885840 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 2120250 # Energy for precharge commands per rank (pJ) -system.physmem_1.readEnergy 21309600 # Energy for read commands per rank (pJ) +system.physmem_1.actEnergy 3938760 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 2149125 # Energy for precharge commands per rank (pJ) +system.physmem_1.readEnergy 21411000 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 3390060960 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 1812535875 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 29552097750 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 34782010275 # Total energy per rank (pJ) -system.physmem_1.averagePower 670.129676 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 49159142250 # Time in different power states -system.physmem_1.memoryStateTime::REF 1733160000 # Time in different power states +system.physmem_1.refreshEnergy 3484144560 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 1835182260 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 30396506250 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 35743331955 # Total energy per rank (pJ) +system.physmem_1.averagePower 670.055238 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 50563679500 # Time in different power states +system.physmem_1.memoryStateTime::REF 1781260000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 1011440250 # Time in different power states +system.physmem_1.memoryStateTime::ACT 998933000 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 11440185 # Number of BP lookups -system.cpu.branchPred.condPredicted 8207191 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 765027 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 6076858 # Number of BTB lookups -system.cpu.branchPred.BTBHits 5316207 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 11450644 # Number of BP lookups +system.cpu.branchPred.condPredicted 8210940 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 765018 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 6085193 # Number of BTB lookups +system.cpu.branchPred.BTBHits 5320740 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 87.482824 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 1173724 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 87.437490 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 1176675 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 216 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 26312 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 24255 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 2057 # Number of indirect misses. +system.cpu.branchPred.indirectLookups 26315 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 24242 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 2073 # Number of indirect misses. system.cpu.branchPredindirectMispredicted 983 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks system.cpu.dtb.fetch_hits 0 # ITB hits system.cpu.dtb.fetch_misses 0 # ITB misses system.cpu.dtb.fetch_acv 0 # ITB acv system.cpu.dtb.fetch_accesses 0 # ITB accesses -system.cpu.dtb.read_hits 20416195 # DTB read hits -system.cpu.dtb.read_misses 43360 # DTB read misses +system.cpu.dtb.read_hits 20415220 # DTB read hits +system.cpu.dtb.read_misses 43383 # DTB read misses system.cpu.dtb.read_acv 0 # DTB read access violations -system.cpu.dtb.read_accesses 20459555 # DTB read accesses -system.cpu.dtb.write_hits 6579893 # DTB write hits -system.cpu.dtb.write_misses 278 # DTB write misses +system.cpu.dtb.read_accesses 20458603 # DTB read accesses +system.cpu.dtb.write_hits 6579912 # DTB write hits +system.cpu.dtb.write_misses 276 # DTB write misses system.cpu.dtb.write_acv 0 # DTB write access violations -system.cpu.dtb.write_accesses 6580171 # DTB write accesses -system.cpu.dtb.data_hits 26996088 # DTB hits -system.cpu.dtb.data_misses 43638 # DTB misses +system.cpu.dtb.write_accesses 6580188 # DTB write accesses +system.cpu.dtb.data_hits 26995132 # DTB hits +system.cpu.dtb.data_misses 43659 # DTB misses system.cpu.dtb.data_acv 0 # DTB access violations -system.cpu.dtb.data_accesses 27039726 # DTB accesses -system.cpu.itb.fetch_hits 22951506 # ITB hits +system.cpu.dtb.data_accesses 27038791 # DTB accesses +system.cpu.itb.fetch_hits 22968620 # ITB hits system.cpu.itb.fetch_misses 90 # ITB misses system.cpu.itb.fetch_acv 0 # ITB acv -system.cpu.itb.fetch_accesses 22951596 # ITB accesses +system.cpu.itb.fetch_accesses 22968710 # ITB accesses system.cpu.itb.read_hits 0 # DTB read hits system.cpu.itb.read_misses 0 # DTB read misses system.cpu.itb.read_acv 0 # DTB read access violations @@ -299,16 +299,16 @@ system.cpu.itb.data_misses 0 # DT system.cpu.itb.data_acv 0 # DTB access violations system.cpu.itb.data_accesses 0 # DTB accesses system.cpu.workload.num_syscalls 389 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 103811269 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 106689529 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 91903089 # Number of instructions committed system.cpu.committedOps 91903089 # Number of ops (including micro ops) committed -system.cpu.discardedOps 2181586 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 2191325 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.129573 # CPI: cycles per instruction -system.cpu.ipc 0.885290 # IPC: instructions per cycle +system.cpu.cpi 1.160892 # CPI: cycles per instruction +system.cpu.ipc 0.861407 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 7723353 8.40% 8.40% # Class of committed instruction system.cpu.op_class_0::IntAlu 51001454 55.49% 63.90% # Class of committed instruction system.cpu.op_class_0::IntMult 458252 0.50% 64.40% # Class of committed instruction @@ -344,76 +344,76 @@ system.cpu.op_class_0::MemWrite 6501126 7.07% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 91903089 # Class of committed instruction -system.cpu.tickCycles 102098443 # Number of cycles that the object actually ticked -system.cpu.idleCycles 1712826 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states +system.cpu.tickCycles 103791732 # Number of cycles that the object actually ticked +system.cpu.idleCycles 2897797 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 157 # number of replacements -system.cpu.dcache.tags.tagsinuse 1447.414267 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 26572424 # Total number of references to valid blocks. -system.cpu.dcache.tags.sampled_refs 2230 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 11915.885202 # Average number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 1447.584436 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 26572205 # Total number of references to valid blocks. +system.cpu.dcache.tags.sampled_refs 2231 # Sample count of references to valid blocks. +system.cpu.dcache.tags.avg_refs 11910.445988 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 1447.414267 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.353373 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.353373 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_task_id_blocks::1024 2073 # Occupied blocks per task id +system.cpu.dcache.tags.occ_blocks::cpu.data 1447.584436 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.353414 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.353414 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_task_id_blocks::1024 2074 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 19 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 43 # Occupied blocks per task id -system.cpu.dcache.tags.age_task_id_blocks_1024::2 227 # Occupied blocks per task id +system.cpu.dcache.tags.age_task_id_blocks_1024::2 228 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::3 405 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::4 1379 # Occupied blocks per task id -system.cpu.dcache.tags.occ_task_id_percent::1024 0.506104 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 53153936 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 53153936 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 20074229 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 20074229 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 6498195 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 6498195 # number of WriteReq hits -system.cpu.dcache.demand_hits::cpu.data 26572424 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 26572424 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 26572424 # number of overall hits -system.cpu.dcache.overall_hits::total 26572424 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 521 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 521 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 2908 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 2908 # number of WriteReq misses -system.cpu.dcache.demand_misses::cpu.data 3429 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 3429 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 3429 # number of overall misses -system.cpu.dcache.overall_misses::total 3429 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 40464500 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 40464500 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 214055500 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 214055500 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 254520000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 254520000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 254520000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 254520000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 20074750 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 20074750 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.tags.occ_task_id_percent::1024 0.506348 # Percentage of cache occupancy per task id +system.cpu.dcache.tags.tag_accesses 53153443 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 53153443 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 20074007 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 20074007 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 6498198 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 6498198 # number of WriteReq hits +system.cpu.dcache.demand_hits::cpu.data 26572205 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 26572205 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 26572205 # number of overall hits +system.cpu.dcache.overall_hits::total 26572205 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 496 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 496 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 2905 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 2905 # number of WriteReq misses +system.cpu.dcache.demand_misses::cpu.data 3401 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 3401 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 3401 # number of overall misses +system.cpu.dcache.overall_misses::total 3401 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 37448500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 37448500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 219755500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 219755500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 257204000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 257204000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 257204000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 257204000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 20074503 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 20074503 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 6501103 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 6501103 # number of WriteReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 26575853 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 26575853 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 26575853 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 26575853 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000026 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.000026 # miss rate for ReadReq accesses +system.cpu.dcache.demand_accesses::cpu.data 26575606 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 26575606 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 26575606 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 26575606 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000025 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.000025 # miss rate for ReadReq accesses system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.000447 # miss rate for WriteReq accesses system.cpu.dcache.WriteReq_miss_rate::total 0.000447 # miss rate for WriteReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.000129 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.000129 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.000129 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.000129 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 77666.986564 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 77666.986564 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 73609.181568 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 73609.181568 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 74225.721785 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 74225.721785 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 74225.721785 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 74225.721785 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.000128 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.000128 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.000128 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.000128 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 75501.008065 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 75501.008065 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 75647.332186 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 75647.332186 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 75625.992355 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 75625.992355 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 75625.992355 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 75625.992355 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -422,30 +422,30 @@ system.cpu.dcache.avg_blocked_cycles::no_mshrs nan system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.dcache.writebacks::writebacks 107 # number of writebacks system.cpu.dcache.writebacks::total 107 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 36 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 36 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 1163 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 1163 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 1199 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 1199 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 1199 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 1199 # number of overall MSHR hits -system.cpu.dcache.ReadReq_mshr_misses::cpu.data 485 # number of ReadReq MSHR misses -system.cpu.dcache.ReadReq_mshr_misses::total 485 # number of ReadReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1745 # number of WriteReq MSHR misses -system.cpu.dcache.WriteReq_mshr_misses::total 1745 # number of WriteReq MSHR misses -system.cpu.dcache.demand_mshr_misses::cpu.data 2230 # number of demand (read+write) MSHR misses -system.cpu.dcache.demand_mshr_misses::total 2230 # number of demand (read+write) MSHR misses -system.cpu.dcache.overall_mshr_misses::cpu.data 2230 # number of overall MSHR misses -system.cpu.dcache.overall_mshr_misses::total 2230 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 36953000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 36953000 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 131397000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 131397000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 168350000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 168350000 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 168350000 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 168350000 # number of overall MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 8 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 8 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 1162 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 1162 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 1170 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 1170 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 1170 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 1170 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_misses::cpu.data 488 # number of ReadReq MSHR misses +system.cpu.dcache.ReadReq_mshr_misses::total 488 # number of ReadReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1743 # number of WriteReq MSHR misses +system.cpu.dcache.WriteReq_mshr_misses::total 1743 # number of WriteReq MSHR misses +system.cpu.dcache.demand_mshr_misses::cpu.data 2231 # number of demand (read+write) MSHR misses +system.cpu.dcache.demand_mshr_misses::total 2231 # number of demand (read+write) MSHR misses +system.cpu.dcache.overall_mshr_misses::cpu.data 2231 # number of overall MSHR misses +system.cpu.dcache.overall_mshr_misses::total 2231 # number of overall MSHR misses +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 36544000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 36544000 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 137282000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 137282000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 173826000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 173826000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 173826000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 173826000 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.000024 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.000024 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000268 # mshr miss rate for WriteReq accesses @@ -454,322 +454,324 @@ system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.000084 system.cpu.dcache.demand_mshr_miss_rate::total 0.000084 # mshr miss rate for demand accesses system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.000084 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate::total 0.000084 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 76191.752577 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 76191.752577 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 75299.140401 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 75299.140401 # average WriteReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 75493.273543 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 75493.273543 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 75493.273543 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 75493.273543 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 13853 # number of replacements -system.cpu.icache.tags.tagsinuse 1642.330146 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 22935687 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 15818 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 1449.973891 # Average number of references to valid blocks. +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 74885.245902 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 74885.245902 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 78761.904762 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 78761.904762 # average WriteReq mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 77913.939937 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 77913.939937 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 77913.939937 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 77913.939937 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 13865 # number of replacements +system.cpu.icache.tags.tagsinuse 1642.714068 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 22952789 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 15830 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 1449.955085 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1642.330146 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.801919 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.801919 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_blocks::cpu.inst 1642.714068 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.802106 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.802106 # Average percentage of cache occupancy system.cpu.icache.tags.occ_task_id_blocks::1024 1965 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 54 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::1 143 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::2 672 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::1 144 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::2 670 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::3 150 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 946 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 947 # Occupied blocks per task id system.cpu.icache.tags.occ_task_id_percent::1024 0.959473 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 45918830 # Number of tag accesses -system.cpu.icache.tags.data_accesses 45918830 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 22935687 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 22935687 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 22935687 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 22935687 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 22935687 # number of overall hits -system.cpu.icache.overall_hits::total 22935687 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 15819 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 15819 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 15819 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 15819 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 15819 # number of overall misses -system.cpu.icache.overall_misses::total 15819 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 406827000 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 406827000 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 406827000 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 406827000 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 406827000 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 406827000 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 22951506 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 22951506 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 22951506 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 22951506 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 22951506 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 22951506 # number of overall (read+write) accesses +system.cpu.icache.tags.tag_accesses 45953070 # Number of tag accesses +system.cpu.icache.tags.data_accesses 45953070 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 22952789 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 22952789 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 22952789 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 22952789 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 22952789 # number of overall hits +system.cpu.icache.overall_hits::total 22952789 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 15831 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 15831 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 15831 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 15831 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 15831 # number of overall misses +system.cpu.icache.overall_misses::total 15831 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 409090000 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 409090000 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 409090000 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 409090000 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 409090000 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 409090000 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 22968620 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 22968620 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 22968620 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 22968620 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 22968620 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 22968620 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000689 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000689 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000689 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000689 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000689 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000689 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 25717.618054 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 25717.618054 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 25717.618054 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 25717.618054 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 25717.618054 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 25717.618054 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 25841.071316 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 25841.071316 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 25841.071316 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 25841.071316 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 25841.071316 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 25841.071316 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 13853 # number of writebacks -system.cpu.icache.writebacks::total 13853 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 15819 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 15819 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 15819 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 15819 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 15819 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 15819 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 391009000 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 391009000 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 391009000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 391009000 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 391009000 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 391009000 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 13865 # number of writebacks +system.cpu.icache.writebacks::total 13865 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 15831 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 15831 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 15831 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 15831 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 15831 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 15831 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 393260000 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 393260000 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 393260000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 393260000 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 393260000 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 393260000 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000689 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000689 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000689 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000689 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000689 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000689 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 24717.681269 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 24717.681269 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 24717.681269 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 24717.681269 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 24717.681269 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 24717.681269 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 24841.134483 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 24841.134483 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 24841.134483 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 24841.134483 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 24841.134483 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 24841.134483 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 2479.710860 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 26619 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 3667 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 7.259067 # Average number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 2482.282304 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 26642 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 3671 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 7.257423 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 17.780381 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 2101.965355 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 359.965124 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_percent::writebacks 0.000543 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.064147 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.data 0.010985 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.075675 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 3667 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_blocks::writebacks 17.761061 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 2102.458659 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 362.062585 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_percent::writebacks 0.000542 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.064162 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.data 0.011049 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::total 0.075753 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 3671 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::0 65 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::1 142 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 770 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::1 143 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 771 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::3 183 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 2507 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.111908 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 261876 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 261876 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 2509 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.112030 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 262078 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 262078 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.WritebackDirty_hits::writebacks 107 # number of WritebackDirty hits system.cpu.l2cache.WritebackDirty_hits::total 107 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 13853 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 13853 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::writebacks 13865 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 13865 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 26 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 26 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 12649 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 12649 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 12660 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 12660 # number of ReadCleanReq hits system.cpu.l2cache.ReadSharedReq_hits::cpu.data 53 # number of ReadSharedReq hits system.cpu.l2cache.ReadSharedReq_hits::total 53 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 12649 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.inst 12660 # number of demand (read+write) hits system.cpu.l2cache.demand_hits::cpu.data 79 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 12728 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 12649 # number of overall hits +system.cpu.l2cache.demand_hits::total 12739 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 12660 # number of overall hits system.cpu.l2cache.overall_hits::cpu.data 79 # number of overall hits -system.cpu.l2cache.overall_hits::total 12728 # number of overall hits -system.cpu.l2cache.ReadExReq_misses::cpu.data 1719 # number of ReadExReq misses -system.cpu.l2cache.ReadExReq_misses::total 1719 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3169 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 3169 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 432 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 432 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 3169 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 2151 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 5320 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 3169 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 2151 # number of overall misses -system.cpu.l2cache.overall_misses::total 5320 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 128506000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 128506000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 234465500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 234465500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 35663000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 35663000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 234465500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 164169000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 398634500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 234465500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 164169000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 398634500 # number of overall miss cycles +system.cpu.l2cache.overall_hits::total 12739 # number of overall hits +system.cpu.l2cache.ReadExReq_misses::cpu.data 1717 # number of ReadExReq misses +system.cpu.l2cache.ReadExReq_misses::total 1717 # number of ReadExReq misses +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 3170 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 3170 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 435 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 435 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 3170 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 2152 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 5322 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 3170 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 2152 # number of overall misses +system.cpu.l2cache.overall_misses::total 5322 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 134394000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 134394000 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 236583500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 236583500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 35249500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 35249500 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 236583500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 169643500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 406227000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 236583500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 169643500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 406227000 # number of overall miss cycles system.cpu.l2cache.WritebackDirty_accesses::writebacks 107 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackDirty_accesses::total 107 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 13853 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 13853 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::cpu.data 1745 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadExReq_accesses::total 1745 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 15818 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 15818 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 485 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.ReadSharedReq_accesses::total 485 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 15818 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::cpu.data 2230 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 18048 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 15818 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.data 2230 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 18048 # number of overall (read+write) accesses -system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.985100 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_miss_rate::total 0.985100 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.200341 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.200341 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.890722 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.890722 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.200341 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.964574 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.294770 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.200341 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.964574 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.294770 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 74756.253636 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 74756.253636 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 73987.219943 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 73987.219943 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 82553.240741 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 82553.240741 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 73987.219943 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 76322.175732 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 74931.296992 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 73987.219943 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 76322.175732 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 74931.296992 # average overall miss latency +system.cpu.l2cache.WritebackClean_accesses::writebacks 13865 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 13865 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::cpu.data 1743 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadExReq_accesses::total 1743 # number of ReadExReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 15830 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 15830 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 488 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.ReadSharedReq_accesses::total 488 # number of ReadSharedReq accesses(hits+misses) +system.cpu.l2cache.demand_accesses::cpu.inst 15830 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.data 2231 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::total 18061 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 15830 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.data 2231 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 18061 # number of overall (read+write) accesses +system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.985083 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_miss_rate::total 0.985083 # miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.200253 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.200253 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.891393 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.891393 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.200253 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.964590 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.294668 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.200253 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.964590 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.294668 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 78272.568433 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 78272.568433 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74632.018927 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74632.018927 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 81033.333333 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 81033.333333 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74632.018927 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 78830.622677 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 76329.763247 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74632.018927 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 78830.622677 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 76329.763247 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 1719 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadExReq_mshr_misses::total 1719 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3169 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3169 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 432 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.ReadSharedReq_mshr_misses::total 432 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 3169 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.data 2151 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 5320 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 3169 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.data 2151 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 5320 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 111316000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 111316000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 202775500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 202775500 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 31343000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 31343000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 202775500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 142659000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 345434500 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 202775500 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 142659000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 345434500 # number of overall MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.985100 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.985100 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.200341 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.200341 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.890722 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.890722 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.200341 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.964574 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.294770 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.200341 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.964574 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.294770 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 64756.253636 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 64756.253636 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63987.219943 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63987.219943 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 72553.240741 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 72553.240741 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63987.219943 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 66322.175732 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 64931.296992 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63987.219943 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 66322.175732 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 64931.296992 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 32058 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 14010 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 1717 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadExReq_mshr_misses::total 1717 # number of ReadExReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 3170 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 3170 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 435 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.ReadSharedReq_mshr_misses::total 435 # number of ReadSharedReq MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 3170 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.data 2152 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 5322 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 3170 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.data 2152 # number of overall MSHR misses +system.cpu.l2cache.overall_mshr_misses::total 5322 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 117224000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 117224000 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 204883500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 204883500 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 30899500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 30899500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 204883500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 148123500 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 353007000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 204883500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 148123500 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 353007000 # number of overall MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.985083 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.985083 # mshr miss rate for ReadExReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.200253 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.200253 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.891393 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.891393 # mshr miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.200253 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.964590 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.294668 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.200253 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.964590 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate::total 0.294668 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 68272.568433 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 68272.568433 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 64632.018927 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 64632.018927 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 71033.333333 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 71033.333333 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 64632.018927 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 68830.622677 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 66329.763247 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 64632.018927 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 68830.622677 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 66329.763247 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 32083 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 14022 # Number of requests hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_requests 0 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 16303 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 16318 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackDirty 107 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 13853 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 13865 # Transaction distribution system.cpu.toL2Bus.trans_dist::CleanEvict 50 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExReq 1745 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadExResp 1745 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 15818 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadSharedReq 485 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 45489 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 4617 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 50106 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1898944 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 149568 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 2048512 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.trans_dist::ReadExReq 1743 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadExResp 1743 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 15830 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadSharedReq 488 # Transaction distribution +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 45525 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 4619 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 50144 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 1900480 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 149632 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 2050112 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 18048 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 18061 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 18048 100.00% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 18061 100.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 0 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 18048 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 29989000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 18061 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 30013500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.1 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 23727000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 23745000 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 3345000 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 3346500 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.0 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 51905634500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 3601 # Transaction distribution -system.membus.trans_dist::ReadExReq 1719 # Transaction distribution -system.membus.trans_dist::ReadExResp 1719 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 3601 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 10640 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 10640 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 340480 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 340480 # Cumulative packet size per connected master and slave (bytes) +system.membus.pwrStateResidencyTicks::UNDEFINED 53344764500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 3605 # Transaction distribution +system.membus.trans_dist::ReadExReq 1717 # Transaction distribution +system.membus.trans_dist::ReadExResp 1717 # Transaction distribution +system.membus.trans_dist::ReadSharedReq 3605 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 10644 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 10644 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 340608 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 340608 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 5320 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 5322 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 5320 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 5322 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 5320 # Request fanout histogram -system.membus.reqLayer0.occupancy 6419000 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 5322 # Request fanout histogram +system.membus.reqLayer0.occupancy 6419500 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 28167750 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 28179750 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.1 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/config.ini b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/config.ini index 1d39a1715..d82573b75 100644 --- a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/config.ini +++ b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -68,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -104,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -143,11 +157,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -155,13 +176,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -171,6 +197,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -179,8 +206,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -502,13 +534,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -518,6 +555,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -526,8 +564,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -551,13 +594,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -567,6 +615,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -575,19 +624,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -595,6 +656,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -609,7 +677,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/alpha/tru64/twolf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/alpha/tru64/twolf gid=100 input=cin kvmInSE=false @@ -641,9 +709,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -687,6 +761,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -698,7 +773,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simerr b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simerr index f0a9a7c93..e0bca4e4e 100755 --- a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simerr +++ b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simerr @@ -1,5 +1,6 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything warn: Prefetch instructions in Alpha do not do anything diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simout b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simout index a140d0429..1d7fd9550 100755 --- a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simout +++ b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/simout @@ -3,11 +3,13 @@ Redirecting stderr to build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timi gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 20:54:01 -gem5 started Sep 14 2015 21:18:12 -gem5 executing on ribera.cs.wisc.edu -command line: build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timing +gem5 compiled Jul 19 2016 12:23:51 +gem5 started Jul 21 2016 14:09:29 +gem5 executing on e108600-lin, pid 4313 +command line: /work/curdun01/gem5-external.hg/build/ALPHA/gem5.opt -d build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/70.twolf/alpha/tru64/o3-timing +Couldn't unlink build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timing/smred.sav +Couldn't unlink build/ALPHA/tests/opt/long/se/70.twolf/alpha/tru64/o3-timing/smred.sv2 Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... info: Increasing stack size by one page. @@ -24,4 +26,4 @@ Authors: Carl Sechen, Bill Swartz 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 -122 123 124 Exiting @ tick 21919473500 because target called exit() +122 123 124 Exiting @ tick 21909208500 because target called exit() diff --git a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/stats.txt b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/stats.txt index 1294dcd91..002e3eec9 100644 --- a/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/stats.txt +++ b/tests/long/se/70.twolf/ref/alpha/tru64/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.021909 # Nu sim_ticks 21909208500 # Number of ticks simulated final_tick 21909208500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 299674 # Simulator instruction rate (inst/s) -host_op_rate 299674 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 77995222 # Simulator tick rate (ticks/s) -host_mem_usage 302008 # Number of bytes of host memory used -host_seconds 280.90 # Real time elapsed on the host +host_inst_rate 183723 # Simulator instruction rate (inst/s) +host_op_rate 183723 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 47816944 # Simulator tick rate (ticks/s) +host_mem_usage 254944 # Number of bytes of host memory used +host_seconds 458.19 # Real time elapsed on the host sim_insts 84179709 # Number of instructions simulated sim_ops 84179709 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1001,6 +1001,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 150592 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 1492544 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 13699 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0 # Request fanout histogram @@ -1028,6 +1029,7 @@ system.membus.pkt_count::total 10454 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 334528 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 334528 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 5227 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/config.ini b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/config.ini index 5611a7dae..cdcb110c1 100644 --- a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/config.ini +++ b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/config.ini @@ -14,7 +14,9 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 +exit_on_work_items=false init_param=0 kernel= kernel_addr_check=true @@ -24,9 +26,16 @@ mem_mode=timing mem_ranges= memories=system.physmem mmap_using_noreserve=false +multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -55,6 +64,7 @@ decodeCycleInput=true decodeInputBufferSize=3 decodeInputWidth=2 decodeToExecuteForwardDelay=1 +default_p_state=UNDEFINED do_checkpoint_insts=true do_quiesce=true do_statistics_insts=true @@ -99,12 +109,17 @@ max_insts_any_thread=0 max_loads_all_threads=0 max_loads_any_thread=0 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 simpoint_start_insts= socket_id=0 switched_out=false system=system +threadPolicy=RoundRobin tracer=system.cpu.tracer workload=system.cpu.workload dcache_port=system.cpu.dcache.cpu_side @@ -120,11 +135,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -132,13 +154,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -148,6 +175,7 @@ system=system tags=system.cpu.dcache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.dcache_port mem_side=system.cpu.toL2Bus.slave[1] @@ -156,8 +184,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -180,9 +213,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -196,9 +234,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -591,13 +634,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -607,6 +655,7 @@ system=system tags=system.cpu.icache.tags tgts_per_mshr=20 write_buffers=8 +writeback_clean=true cpu_side=system.cpu.icache_port mem_side=system.cpu.toL2Bus.slave[0] @@ -615,8 +664,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 @@ -626,6 +680,7 @@ eventq_index=0 [system.cpu.isa] type=ArmISA +decoderFlavour=Generic eventq_index=0 fpsid=1090793632 id_aa64afr0_el1=0 @@ -673,9 +728,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -689,9 +749,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -701,13 +766,18 @@ children=tags addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain +clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 -forward_snoops=true hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -717,6 +787,7 @@ system=system tags=system.cpu.l2cache.tags tgts_per_mshr=12 write_buffers=8 +writeback_clean=false cpu_side=system.cpu.toL2Bus.master[0] mem_side=system.membus.slave[1] @@ -725,19 +796,31 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 [system.cpu.toL2Bus] type=CoherentXBar +children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=false +power_model=Null response_latency=1 -snoop_filter=Null +snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 system=system use_default_range=false @@ -745,6 +828,13 @@ width=32 master=system.cpu.l2cache.cpu_side slave=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.itb.walker.port system.cpu.dtb.walker.port +[system.cpu.toL2Bus.snoop_filter] +type=SnoopFilter +eventq_index=0 +lookup_latency=0 +max_capacity=8388608 +system=system + [system.cpu.tracer] type=ExeTracer eventq_index=0 @@ -759,7 +849,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/scratch/nilay/GEM5/dist/m5/cpu2000/binaries/arm/linux/twolf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/twolf gid=100 input=cin kvmInSE=false @@ -791,9 +881,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -837,6 +933,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -848,7 +945,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simerr b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simerr +++ b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simout b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simout index 87bca4e9e..90ea58e8e 100755 --- a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simout +++ b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/simout @@ -3,11 +3,13 @@ Redirecting stderr to build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timin gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Sep 14 2015 23:29:19 -gem5 started Sep 15 2015 04:10:24 -gem5 executing on ribera.cs.wisc.edu -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timing -re /scratch/nilay/GEM5/gem5/tests/run.py build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:40:38 +gem5 executing on e108600-lin, pid 23114 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/70.twolf/arm/linux/minor-timing +Couldn't unlink build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timing/smred.sav +Couldn't unlink build/ARM/tests/opt/long/se/70.twolf/arm/linux/minor-timing/smred.sv2 Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -24,4 +26,4 @@ info: Increasing stack size by one page. 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 -122 123 124 Exiting @ tick 130772636500 because target called exit() +122 123 124 Exiting @ tick 132485848500 because target called exit() diff --git a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/stats.txt b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/stats.txt index 31e90a11a..91b6b6b0a 100644 --- a/tests/long/se/70.twolf/ref/arm/linux/minor-timing/stats.txt +++ b/tests/long/se/70.twolf/ref/arm/linux/minor-timing/stats.txt @@ -1,43 +1,43 @@ ---------- Begin Simulation Statistics ---------- -sim_seconds 0.130383 # Number of seconds simulated -sim_ticks 130382890500 # Number of ticks simulated -final_tick 130382890500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) +sim_seconds 0.132486 # Number of seconds simulated +sim_ticks 132485848500 # Number of ticks simulated +final_tick 132485848500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 369340 # Simulator instruction rate (inst/s) -host_op_rate 389344 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 279457902 # Simulator tick rate (ticks/s) -host_mem_usage 317800 # Number of bytes of host memory used -host_seconds 466.56 # Real time elapsed on the host +host_inst_rate 159309 # Simulator instruction rate (inst/s) +host_op_rate 167937 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 122483807 # Simulator tick rate (ticks/s) +host_mem_usage 270152 # Number of bytes of host memory used +host_seconds 1081.66 # Real time elapsed on the host sim_insts 172317810 # Number of instructions simulated sim_ops 181650743 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts system.clk_domain.clock 1000 # Clock period in ticks -system.physmem.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.physmem.bytes_read::cpu.inst 138112 # Number of bytes read from this memory +system.physmem.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.physmem.bytes_read::cpu.inst 138240 # Number of bytes read from this memory system.physmem.bytes_read::cpu.data 109312 # Number of bytes read from this memory -system.physmem.bytes_read::total 247424 # Number of bytes read from this memory -system.physmem.bytes_inst_read::cpu.inst 138112 # Number of instructions bytes read from this memory -system.physmem.bytes_inst_read::total 138112 # Number of instructions bytes read from this memory -system.physmem.num_reads::cpu.inst 2158 # Number of read requests responded to by this memory +system.physmem.bytes_read::total 247552 # Number of bytes read from this memory +system.physmem.bytes_inst_read::cpu.inst 138240 # Number of instructions bytes read from this memory +system.physmem.bytes_inst_read::total 138240 # Number of instructions bytes read from this memory +system.physmem.num_reads::cpu.inst 2160 # Number of read requests responded to by this memory system.physmem.num_reads::cpu.data 1708 # Number of read requests responded to by this memory -system.physmem.num_reads::total 3866 # Number of read requests responded to by this memory -system.physmem.bw_read::cpu.inst 1059280 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::cpu.data 838392 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_read::total 1897672 # Total read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::cpu.inst 1059280 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_inst_read::total 1059280 # Instruction read bandwidth from this memory (bytes/s) -system.physmem.bw_total::cpu.inst 1059280 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::cpu.data 838392 # Total bandwidth to/from this memory (bytes/s) -system.physmem.bw_total::total 1897672 # Total bandwidth to/from this memory (bytes/s) -system.physmem.readReqs 3866 # Number of read requests accepted +system.physmem.num_reads::total 3868 # Number of read requests responded to by this memory +system.physmem.bw_read::cpu.inst 1043432 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::cpu.data 825084 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_read::total 1868517 # Total read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::cpu.inst 1043432 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_inst_read::total 1043432 # Instruction read bandwidth from this memory (bytes/s) +system.physmem.bw_total::cpu.inst 1043432 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::cpu.data 825084 # Total bandwidth to/from this memory (bytes/s) +system.physmem.bw_total::total 1868517 # Total bandwidth to/from this memory (bytes/s) +system.physmem.readReqs 3868 # Number of read requests accepted system.physmem.writeReqs 0 # Number of write requests accepted -system.physmem.readBursts 3866 # Number of DRAM read bursts, including those serviced by the write queue +system.physmem.readBursts 3868 # Number of DRAM read bursts, including those serviced by the write queue system.physmem.writeBursts 0 # Number of DRAM write bursts, including those merged in the write queue -system.physmem.bytesReadDRAM 247424 # Total number of bytes read from DRAM +system.physmem.bytesReadDRAM 247552 # Total number of bytes read from DRAM system.physmem.bytesReadWrQ 0 # Total number of bytes read from write queue system.physmem.bytesWritten 0 # Total number of bytes written to DRAM -system.physmem.bytesReadSys 247424 # Total read bytes from the system interface side +system.physmem.bytesReadSys 247552 # Total read bytes from the system interface side system.physmem.bytesWrittenSys 0 # Total written bytes from the system interface side system.physmem.servicedByWrQ 0 # Number of DRAM read bursts serviced by the write queue system.physmem.mergedWrBursts 0 # Number of DRAM write bursts merged with an existing one @@ -52,12 +52,12 @@ system.physmem.perBankRdBursts::6 273 # Pe system.physmem.perBankRdBursts::7 222 # Per bank write bursts system.physmem.perBankRdBursts::8 248 # Per bank write bursts system.physmem.perBankRdBursts::9 218 # Per bank write bursts -system.physmem.perBankRdBursts::10 295 # Per bank write bursts +system.physmem.perBankRdBursts::10 296 # Per bank write bursts system.physmem.perBankRdBursts::11 200 # Per bank write bursts system.physmem.perBankRdBursts::12 183 # Per bank write bursts system.physmem.perBankRdBursts::13 218 # Per bank write bursts system.physmem.perBankRdBursts::14 224 # Per bank write bursts -system.physmem.perBankRdBursts::15 204 # Per bank write bursts +system.physmem.perBankRdBursts::15 205 # Per bank write bursts system.physmem.perBankWrBursts::0 0 # Per bank write bursts system.physmem.perBankWrBursts::1 0 # Per bank write bursts system.physmem.perBankWrBursts::2 0 # Per bank write bursts @@ -76,14 +76,14 @@ system.physmem.perBankWrBursts::14 0 # Pe system.physmem.perBankWrBursts::15 0 # Per bank write bursts system.physmem.numRdRetry 0 # Number of times read queue was full causing retry system.physmem.numWrRetry 0 # Number of times write queue was full causing retry -system.physmem.totGap 130382796000 # Total gap between requests +system.physmem.totGap 132485754500 # Total gap between requests system.physmem.readPktSize::0 0 # Read request sizes (log2) system.physmem.readPktSize::1 0 # Read request sizes (log2) system.physmem.readPktSize::2 0 # Read request sizes (log2) system.physmem.readPktSize::3 0 # Read request sizes (log2) system.physmem.readPktSize::4 0 # Read request sizes (log2) system.physmem.readPktSize::5 0 # Read request sizes (log2) -system.physmem.readPktSize::6 3866 # Read request sizes (log2) +system.physmem.readPktSize::6 3868 # Read request sizes (log2) system.physmem.writePktSize::0 0 # Write request sizes (log2) system.physmem.writePktSize::1 0 # Write request sizes (log2) system.physmem.writePktSize::2 0 # Write request sizes (log2) @@ -91,9 +91,9 @@ system.physmem.writePktSize::3 0 # Wr system.physmem.writePktSize::4 0 # Write request sizes (log2) system.physmem.writePktSize::5 0 # Write request sizes (log2) system.physmem.writePktSize::6 0 # Write request sizes (log2) -system.physmem.rdQLenPdf::0 3618 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::1 236 # What read queue length does an incoming req see -system.physmem.rdQLenPdf::2 12 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::0 3621 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::1 238 # What read queue length does an incoming req see +system.physmem.rdQLenPdf::2 9 # What read queue length does an incoming req see system.physmem.rdQLenPdf::3 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::4 0 # What read queue length does an incoming req see system.physmem.rdQLenPdf::5 0 # What read queue length does an incoming req see @@ -187,29 +187,29 @@ system.physmem.wrQLenPdf::60 0 # Wh system.physmem.wrQLenPdf::61 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::62 0 # What write queue length does an incoming req see system.physmem.wrQLenPdf::63 0 # What write queue length does an incoming req see -system.physmem.bytesPerActivate::samples 915 # Bytes accessed per row activation -system.physmem.bytesPerActivate::mean 268.939891 # Bytes accessed per row activation -system.physmem.bytesPerActivate::gmean 176.781102 # Bytes accessed per row activation -system.physmem.bytesPerActivate::stdev 276.529935 # Bytes accessed per row activation -system.physmem.bytesPerActivate::0-127 273 29.84% 29.84% # Bytes accessed per row activation -system.physmem.bytesPerActivate::128-255 347 37.92% 67.76% # Bytes accessed per row activation -system.physmem.bytesPerActivate::256-383 83 9.07% 76.83% # Bytes accessed per row activation -system.physmem.bytesPerActivate::384-511 59 6.45% 83.28% # Bytes accessed per row activation -system.physmem.bytesPerActivate::512-639 35 3.83% 87.10% # Bytes accessed per row activation -system.physmem.bytesPerActivate::640-767 24 2.62% 89.73% # Bytes accessed per row activation -system.physmem.bytesPerActivate::768-895 16 1.75% 91.48% # Bytes accessed per row activation -system.physmem.bytesPerActivate::896-1023 20 2.19% 93.66% # Bytes accessed per row activation -system.physmem.bytesPerActivate::1024-1151 58 6.34% 100.00% # Bytes accessed per row activation -system.physmem.bytesPerActivate::total 915 # Bytes accessed per row activation -system.physmem.totQLat 27071500 # Total ticks spent queuing -system.physmem.totMemAccLat 99559000 # Total ticks spent from burst creation until serviced by the DRAM -system.physmem.totBusLat 19330000 # Total ticks spent in databus transfers -system.physmem.avgQLat 7002.46 # Average queueing delay per DRAM burst +system.physmem.bytesPerActivate::samples 929 # Bytes accessed per row activation +system.physmem.bytesPerActivate::mean 264.680301 # Bytes accessed per row activation +system.physmem.bytesPerActivate::gmean 173.140302 # Bytes accessed per row activation +system.physmem.bytesPerActivate::stdev 275.634226 # Bytes accessed per row activation +system.physmem.bytesPerActivate::0-127 285 30.68% 30.68% # Bytes accessed per row activation +system.physmem.bytesPerActivate::128-255 355 38.21% 68.89% # Bytes accessed per row activation +system.physmem.bytesPerActivate::256-383 86 9.26% 78.15% # Bytes accessed per row activation +system.physmem.bytesPerActivate::384-511 48 5.17% 83.32% # Bytes accessed per row activation +system.physmem.bytesPerActivate::512-639 35 3.77% 87.08% # Bytes accessed per row activation +system.physmem.bytesPerActivate::640-767 24 2.58% 89.67% # Bytes accessed per row activation +system.physmem.bytesPerActivate::768-895 21 2.26% 91.93% # Bytes accessed per row activation +system.physmem.bytesPerActivate::896-1023 19 2.05% 93.97% # Bytes accessed per row activation +system.physmem.bytesPerActivate::1024-1151 56 6.03% 100.00% # Bytes accessed per row activation +system.physmem.bytesPerActivate::total 929 # Bytes accessed per row activation +system.physmem.totQLat 30291250 # Total ticks spent queuing +system.physmem.totMemAccLat 102816250 # Total ticks spent from burst creation until serviced by the DRAM +system.physmem.totBusLat 19340000 # Total ticks spent in databus transfers +system.physmem.avgQLat 7831.24 # Average queueing delay per DRAM burst system.physmem.avgBusLat 5000.00 # Average bus latency per DRAM burst -system.physmem.avgMemAccLat 25752.46 # Average memory access latency per DRAM burst -system.physmem.avgRdBW 1.90 # Average DRAM read bandwidth in MiByte/s +system.physmem.avgMemAccLat 26581.24 # Average memory access latency per DRAM burst +system.physmem.avgRdBW 1.87 # Average DRAM read bandwidth in MiByte/s system.physmem.avgWrBW 0.00 # Average achieved write bandwidth in MiByte/s -system.physmem.avgRdBWSys 1.90 # Average system read bandwidth in MiByte/s +system.physmem.avgRdBWSys 1.87 # Average system read bandwidth in MiByte/s system.physmem.avgWrBWSys 0.00 # Average system write bandwidth in MiByte/s system.physmem.peakBW 12800.00 # Theoretical peak bandwidth in MiByte/s system.physmem.busUtil 0.01 # Data bus utilization in percentage @@ -217,56 +217,56 @@ system.physmem.busUtilRead 0.01 # Da system.physmem.busUtilWrite 0.00 # Data bus utilization in percentage for writes system.physmem.avgRdQLen 1.00 # Average read queue length when enqueuing system.physmem.avgWrQLen 0.00 # Average write queue length when enqueuing -system.physmem.readRowHits 2948 # Number of row buffer hits during reads +system.physmem.readRowHits 2934 # Number of row buffer hits during reads system.physmem.writeRowHits 0 # Number of row buffer hits during writes -system.physmem.readRowHitRate 76.25 # Row buffer hit rate for reads +system.physmem.readRowHitRate 75.85 # Row buffer hit rate for reads system.physmem.writeRowHitRate nan # Row buffer hit rate for writes -system.physmem.avgGap 33725503.36 # Average gap between requests -system.physmem.pageHitRate 76.25 # Row buffer hit rate, read and write combined -system.physmem_0.actEnergy 3144960 # Energy for activate commands per rank (pJ) -system.physmem_0.preEnergy 1716000 # Energy for precharge commands per rank (pJ) -system.physmem_0.readEnergy 16192800 # Energy for read commands per rank (pJ) +system.physmem.avgGap 34251746.25 # Average gap between requests +system.physmem.pageHitRate 75.85 # Row buffer hit rate, read and write combined +system.physmem_0.actEnergy 3182760 # Energy for activate commands per rank (pJ) +system.physmem_0.preEnergy 1736625 # Energy for precharge commands per rank (pJ) +system.physmem_0.readEnergy 16161600 # Energy for read commands per rank (pJ) system.physmem_0.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_0.refreshEnergy 8515837200 # Energy for refresh commands per rank (pJ) -system.physmem_0.actBackEnergy 3562127505 # Energy for active background per rank (pJ) -system.physmem_0.preBackEnergy 75103936500 # Energy for precharge background per rank (pJ) -system.physmem_0.totalEnergy 87202954965 # Total energy per rank (pJ) -system.physmem_0.averagePower 668.831686 # Core power per rank (mW) -system.physmem_0.memoryStateTime::IDLE 124939990750 # Time in different power states -system.physmem_0.memoryStateTime::REF 4353700000 # Time in different power states +system.physmem_0.refreshEnergy 8653148400 # Energy for refresh commands per rank (pJ) +system.physmem_0.actBackEnergy 3626588520 # Energy for active background per rank (pJ) +system.physmem_0.preBackEnergy 76308756000 # Energy for precharge background per rank (pJ) +system.physmem_0.totalEnergy 88609573905 # Total energy per rank (pJ) +system.physmem_0.averagePower 668.835850 # Core power per rank (mW) +system.physmem_0.memoryStateTime::IDLE 126944435250 # Time in different power states +system.physmem_0.memoryStateTime::REF 4423900000 # Time in different power states system.physmem_0.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_0.memoryStateTime::ACT 1087339250 # Time in different power states +system.physmem_0.memoryStateTime::ACT 1115186250 # Time in different power states system.physmem_0.memoryStateTime::ACT_PDN 0 # Time in different power states -system.physmem_1.actEnergy 3764880 # Energy for activate commands per rank (pJ) -system.physmem_1.preEnergy 2054250 # Energy for precharge commands per rank (pJ) +system.physmem_1.actEnergy 3825360 # Energy for activate commands per rank (pJ) +system.physmem_1.preEnergy 2087250 # Energy for precharge commands per rank (pJ) system.physmem_1.readEnergy 13790400 # Energy for read commands per rank (pJ) system.physmem_1.writeEnergy 0 # Energy for write commands per rank (pJ) -system.physmem_1.refreshEnergy 8515837200 # Energy for refresh commands per rank (pJ) -system.physmem_1.actBackEnergy 3544157970 # Energy for active background per rank (pJ) -system.physmem_1.preBackEnergy 75119701500 # Energy for precharge background per rank (pJ) -system.physmem_1.totalEnergy 87199306200 # Total energy per rank (pJ) -system.physmem_1.averagePower 668.803682 # Core power per rank (mW) -system.physmem_1.memoryStateTime::IDLE 124966482000 # Time in different power states -system.physmem_1.memoryStateTime::REF 4353700000 # Time in different power states +system.physmem_1.refreshEnergy 8653148400 # Energy for refresh commands per rank (pJ) +system.physmem_1.actBackEnergy 3635416395 # Energy for active background per rank (pJ) +system.physmem_1.preBackEnergy 76301020500 # Energy for precharge background per rank (pJ) +system.physmem_1.totalEnergy 88609288305 # Total energy per rank (pJ) +system.physmem_1.averagePower 668.833625 # Core power per rank (mW) +system.physmem_1.memoryStateTime::IDLE 126931702750 # Time in different power states +system.physmem_1.memoryStateTime::REF 4423900000 # Time in different power states system.physmem_1.memoryStateTime::PRE_PDN 0 # Time in different power states -system.physmem_1.memoryStateTime::ACT 1060850750 # Time in different power states +system.physmem_1.memoryStateTime::ACT 1127787750 # Time in different power states system.physmem_1.memoryStateTime::ACT_PDN 0 # Time in different power states -system.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.branchPred.lookups 49622074 # Number of BP lookups -system.cpu.branchPred.condPredicted 39447439 # Number of conditional branches predicted -system.cpu.branchPred.condIncorrect 5514206 # Number of conditional branches incorrect -system.cpu.branchPred.BTBLookups 24092073 # Number of BTB lookups -system.cpu.branchPred.BTBHits 22843202 # Number of BTB hits +system.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.branchPred.lookups 49693791 # Number of BP lookups +system.cpu.branchPred.condPredicted 39499604 # Number of conditional branches predicted +system.cpu.branchPred.condIncorrect 5516746 # Number of conditional branches incorrect +system.cpu.branchPred.BTBLookups 24160971 # Number of BTB lookups +system.cpu.branchPred.BTBHits 22899506 # Number of BTB hits system.cpu.branchPred.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. -system.cpu.branchPred.BTBHitPct 94.816258 # BTB Hit Percentage -system.cpu.branchPred.usedRAS 1888965 # Number of times the RAS was used to get a target. +system.cpu.branchPred.BTBHitPct 94.778914 # BTB Hit Percentage +system.cpu.branchPred.usedRAS 1894448 # Number of times the RAS was used to get a target. system.cpu.branchPred.RASInCorrect 142 # Number of incorrect RAS predictions. -system.cpu.branchPred.indirectLookups 213748 # Number of indirect predictor lookups. -system.cpu.branchPred.indirectHits 207973 # Number of indirect target hits. -system.cpu.branchPred.indirectMisses 5775 # Number of indirect misses. -system.cpu.branchPredindirectMispredicted 40452 # Number of mispredicted indirect branches. +system.cpu.branchPred.indirectLookups 213843 # Number of indirect predictor lookups. +system.cpu.branchPred.indirectHits 208090 # Number of indirect target hits. +system.cpu.branchPred.indirectMisses 5753 # Number of indirect misses. +system.cpu.branchPredindirectMispredicted 40382 # Number of mispredicted indirect branches. system.cpu_clk_domain.clock 500 # Clock period in ticks -system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.dstage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.dstage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dstage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -296,7 +296,7 @@ system.cpu.dstage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.dstage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.dstage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.dstage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.dtb.walker.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.dtb.walker.walks 0 # Table walker walks requested system.cpu.dtb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.dtb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -326,7 +326,7 @@ system.cpu.dtb.inst_accesses 0 # IT system.cpu.dtb.hits 0 # DTB hits system.cpu.dtb.misses 0 # DTB misses system.cpu.dtb.accesses 0 # DTB accesses -system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.istage2_mmu.stage2_tlb.walker.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.istage2_mmu.stage2_tlb.walker.walks 0 # Table walker walks requested system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.istage2_mmu.stage2_tlb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -356,7 +356,7 @@ system.cpu.istage2_mmu.stage2_tlb.inst_accesses 0 system.cpu.istage2_mmu.stage2_tlb.hits 0 # DTB hits system.cpu.istage2_mmu.stage2_tlb.misses 0 # DTB misses system.cpu.istage2_mmu.stage2_tlb.accesses 0 # DTB accesses -system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.itb.walker.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.itb.walker.walks 0 # Table walker walks requested system.cpu.itb.walker.walkRequestOrigin_Requested::Data 0 # Table walker requests started/completed, data/inst system.cpu.itb.walker.walkRequestOrigin_Requested::Inst 0 # Table walker requests started/completed, data/inst @@ -387,16 +387,16 @@ system.cpu.itb.hits 0 # DT system.cpu.itb.misses 0 # DTB misses system.cpu.itb.accesses 0 # DTB accesses system.cpu.workload.num_syscalls 400 # Number of system calls -system.cpu.pwrStateResidencyTicks::ON 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.numCycles 260765781 # number of cpu cycles simulated +system.cpu.pwrStateResidencyTicks::ON 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.numCycles 264971697 # number of cpu cycles simulated system.cpu.numWorkItemsStarted 0 # number of work items this cpu started system.cpu.numWorkItemsCompleted 0 # number of work items this cpu completed system.cpu.committedInsts 172317810 # Number of instructions committed system.cpu.committedOps 181650743 # Number of ops (including micro ops) committed -system.cpu.discardedOps 11583006 # Number of ops (including micro ops) which were discarded before commit +system.cpu.discardedOps 11524051 # Number of ops (including micro ops) which were discarded before commit system.cpu.numFetchSuspends 0 # Number of times Execute suspended instruction fetching -system.cpu.cpi 1.513284 # CPI: cycles per instruction -system.cpu.ipc 0.660815 # IPC: instructions per cycle +system.cpu.cpi 1.537692 # CPI: cycles per instruction +system.cpu.ipc 0.650325 # IPC: instructions per cycle system.cpu.op_class_0::No_OpClass 0 0.00% 0.00% # Class of committed instruction system.cpu.op_class_0::IntAlu 138988213 76.51% 76.51% # Class of committed instruction system.cpu.op_class_0::IntMult 908940 0.50% 77.01% # Class of committed instruction @@ -432,18 +432,18 @@ system.cpu.op_class_0::MemWrite 12644635 6.96% 100.00% # Cl system.cpu.op_class_0::IprAccess 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::InstPrefetch 0 0.00% 100.00% # Class of committed instruction system.cpu.op_class_0::total 181650743 # Class of committed instruction -system.cpu.tickCycles 254551967 # Number of cycles that the object actually ticked -system.cpu.idleCycles 6213814 # Total number of cycles that the object has spent stopped -system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.tickCycles 256731546 # Number of cycles that the object actually ticked +system.cpu.idleCycles 8240151 # Total number of cycles that the object has spent stopped +system.cpu.dcache.tags.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.dcache.tags.replacements 42 # number of replacements -system.cpu.dcache.tags.tagsinuse 1378.689350 # Cycle average of tags in use -system.cpu.dcache.tags.total_refs 40754473 # Total number of references to valid blocks. +system.cpu.dcache.tags.tagsinuse 1378.678714 # Cycle average of tags in use +system.cpu.dcache.tags.total_refs 40755400 # Total number of references to valid blocks. system.cpu.dcache.tags.sampled_refs 1811 # Sample count of references to valid blocks. -system.cpu.dcache.tags.avg_refs 22503.850359 # Average number of references to valid blocks. +system.cpu.dcache.tags.avg_refs 22504.362231 # Average number of references to valid blocks. system.cpu.dcache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.dcache.tags.occ_blocks::cpu.data 1378.689350 # Average occupied blocks per requestor -system.cpu.dcache.tags.occ_percent::cpu.data 0.336594 # Average percentage of cache occupancy -system.cpu.dcache.tags.occ_percent::total 0.336594 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_blocks::cpu.data 1378.678714 # Average occupied blocks per requestor +system.cpu.dcache.tags.occ_percent::cpu.data 0.336591 # Average percentage of cache occupancy +system.cpu.dcache.tags.occ_percent::total 0.336591 # Average percentage of cache occupancy system.cpu.dcache.tags.occ_task_id_blocks::1024 1769 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::0 18 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::1 38 # Occupied blocks per task id @@ -451,43 +451,43 @@ system.cpu.dcache.tags.age_task_id_blocks_1024::2 83 system.cpu.dcache.tags.age_task_id_blocks_1024::3 271 # Occupied blocks per task id system.cpu.dcache.tags.age_task_id_blocks_1024::4 1359 # Occupied blocks per task id system.cpu.dcache.tags.occ_task_id_percent::1024 0.431885 # Percentage of cache occupancy per task id -system.cpu.dcache.tags.tag_accesses 81515639 # Number of tag accesses -system.cpu.dcache.tags.data_accesses 81515639 # Number of data accesses -system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.dcache.ReadReq_hits::cpu.data 28346557 # number of ReadReq hits -system.cpu.dcache.ReadReq_hits::total 28346557 # number of ReadReq hits -system.cpu.dcache.WriteReq_hits::cpu.data 12362640 # number of WriteReq hits -system.cpu.dcache.WriteReq_hits::total 12362640 # number of WriteReq hits +system.cpu.dcache.tags.tag_accesses 81517417 # Number of tag accesses +system.cpu.dcache.tags.data_accesses 81517417 # Number of data accesses +system.cpu.dcache.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.dcache.ReadReq_hits::cpu.data 28347488 # number of ReadReq hits +system.cpu.dcache.ReadReq_hits::total 28347488 # number of ReadReq hits +system.cpu.dcache.WriteReq_hits::cpu.data 12362636 # number of WriteReq hits +system.cpu.dcache.WriteReq_hits::total 12362636 # number of WriteReq hits system.cpu.dcache.SoftPFReq_hits::cpu.data 462 # number of SoftPFReq hits system.cpu.dcache.SoftPFReq_hits::total 462 # number of SoftPFReq hits system.cpu.dcache.LoadLockedReq_hits::cpu.data 22407 # number of LoadLockedReq hits system.cpu.dcache.LoadLockedReq_hits::total 22407 # number of LoadLockedReq hits system.cpu.dcache.StoreCondReq_hits::cpu.data 22407 # number of StoreCondReq hits system.cpu.dcache.StoreCondReq_hits::total 22407 # number of StoreCondReq hits -system.cpu.dcache.demand_hits::cpu.data 40709197 # number of demand (read+write) hits -system.cpu.dcache.demand_hits::total 40709197 # number of demand (read+write) hits -system.cpu.dcache.overall_hits::cpu.data 40709659 # number of overall hits -system.cpu.dcache.overall_hits::total 40709659 # number of overall hits -system.cpu.dcache.ReadReq_misses::cpu.data 793 # number of ReadReq misses -system.cpu.dcache.ReadReq_misses::total 793 # number of ReadReq misses -system.cpu.dcache.WriteReq_misses::cpu.data 1647 # number of WriteReq misses -system.cpu.dcache.WriteReq_misses::total 1647 # number of WriteReq misses +system.cpu.dcache.demand_hits::cpu.data 40710124 # number of demand (read+write) hits +system.cpu.dcache.demand_hits::total 40710124 # number of demand (read+write) hits +system.cpu.dcache.overall_hits::cpu.data 40710586 # number of overall hits +system.cpu.dcache.overall_hits::total 40710586 # number of overall hits +system.cpu.dcache.ReadReq_misses::cpu.data 751 # number of ReadReq misses +system.cpu.dcache.ReadReq_misses::total 751 # number of ReadReq misses +system.cpu.dcache.WriteReq_misses::cpu.data 1651 # number of WriteReq misses +system.cpu.dcache.WriteReq_misses::total 1651 # number of WriteReq misses system.cpu.dcache.SoftPFReq_misses::cpu.data 1 # number of SoftPFReq misses system.cpu.dcache.SoftPFReq_misses::total 1 # number of SoftPFReq misses -system.cpu.dcache.demand_misses::cpu.data 2440 # number of demand (read+write) misses -system.cpu.dcache.demand_misses::total 2440 # number of demand (read+write) misses -system.cpu.dcache.overall_misses::cpu.data 2441 # number of overall misses -system.cpu.dcache.overall_misses::total 2441 # number of overall misses -system.cpu.dcache.ReadReq_miss_latency::cpu.data 59629000 # number of ReadReq miss cycles -system.cpu.dcache.ReadReq_miss_latency::total 59629000 # number of ReadReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::cpu.data 126003000 # number of WriteReq miss cycles -system.cpu.dcache.WriteReq_miss_latency::total 126003000 # number of WriteReq miss cycles -system.cpu.dcache.demand_miss_latency::cpu.data 185632000 # number of demand (read+write) miss cycles -system.cpu.dcache.demand_miss_latency::total 185632000 # number of demand (read+write) miss cycles -system.cpu.dcache.overall_miss_latency::cpu.data 185632000 # number of overall miss cycles -system.cpu.dcache.overall_miss_latency::total 185632000 # number of overall miss cycles -system.cpu.dcache.ReadReq_accesses::cpu.data 28347350 # number of ReadReq accesses(hits+misses) -system.cpu.dcache.ReadReq_accesses::total 28347350 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.demand_misses::cpu.data 2402 # number of demand (read+write) misses +system.cpu.dcache.demand_misses::total 2402 # number of demand (read+write) misses +system.cpu.dcache.overall_misses::cpu.data 2403 # number of overall misses +system.cpu.dcache.overall_misses::total 2403 # number of overall misses +system.cpu.dcache.ReadReq_miss_latency::cpu.data 55315500 # number of ReadReq miss cycles +system.cpu.dcache.ReadReq_miss_latency::total 55315500 # number of ReadReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::cpu.data 127182500 # number of WriteReq miss cycles +system.cpu.dcache.WriteReq_miss_latency::total 127182500 # number of WriteReq miss cycles +system.cpu.dcache.demand_miss_latency::cpu.data 182498000 # number of demand (read+write) miss cycles +system.cpu.dcache.demand_miss_latency::total 182498000 # number of demand (read+write) miss cycles +system.cpu.dcache.overall_miss_latency::cpu.data 182498000 # number of overall miss cycles +system.cpu.dcache.overall_miss_latency::total 182498000 # number of overall miss cycles +system.cpu.dcache.ReadReq_accesses::cpu.data 28348239 # number of ReadReq accesses(hits+misses) +system.cpu.dcache.ReadReq_accesses::total 28348239 # number of ReadReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::cpu.data 12364287 # number of WriteReq accesses(hits+misses) system.cpu.dcache.WriteReq_accesses::total 12364287 # number of WriteReq accesses(hits+misses) system.cpu.dcache.SoftPFReq_accesses::cpu.data 463 # number of SoftPFReq accesses(hits+misses) @@ -496,28 +496,28 @@ system.cpu.dcache.LoadLockedReq_accesses::cpu.data 22407 system.cpu.dcache.LoadLockedReq_accesses::total 22407 # number of LoadLockedReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::cpu.data 22407 # number of StoreCondReq accesses(hits+misses) system.cpu.dcache.StoreCondReq_accesses::total 22407 # number of StoreCondReq accesses(hits+misses) -system.cpu.dcache.demand_accesses::cpu.data 40711637 # number of demand (read+write) accesses -system.cpu.dcache.demand_accesses::total 40711637 # number of demand (read+write) accesses -system.cpu.dcache.overall_accesses::cpu.data 40712100 # number of overall (read+write) accesses -system.cpu.dcache.overall_accesses::total 40712100 # number of overall (read+write) accesses -system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000028 # miss rate for ReadReq accesses -system.cpu.dcache.ReadReq_miss_rate::total 0.000028 # miss rate for ReadReq accesses -system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.000133 # miss rate for WriteReq accesses -system.cpu.dcache.WriteReq_miss_rate::total 0.000133 # miss rate for WriteReq accesses +system.cpu.dcache.demand_accesses::cpu.data 40712526 # number of demand (read+write) accesses +system.cpu.dcache.demand_accesses::total 40712526 # number of demand (read+write) accesses +system.cpu.dcache.overall_accesses::cpu.data 40712989 # number of overall (read+write) accesses +system.cpu.dcache.overall_accesses::total 40712989 # number of overall (read+write) accesses +system.cpu.dcache.ReadReq_miss_rate::cpu.data 0.000026 # miss rate for ReadReq accesses +system.cpu.dcache.ReadReq_miss_rate::total 0.000026 # miss rate for ReadReq accesses +system.cpu.dcache.WriteReq_miss_rate::cpu.data 0.000134 # miss rate for WriteReq accesses +system.cpu.dcache.WriteReq_miss_rate::total 0.000134 # miss rate for WriteReq accesses system.cpu.dcache.SoftPFReq_miss_rate::cpu.data 0.002160 # miss rate for SoftPFReq accesses system.cpu.dcache.SoftPFReq_miss_rate::total 0.002160 # miss rate for SoftPFReq accesses -system.cpu.dcache.demand_miss_rate::cpu.data 0.000060 # miss rate for demand accesses -system.cpu.dcache.demand_miss_rate::total 0.000060 # miss rate for demand accesses -system.cpu.dcache.overall_miss_rate::cpu.data 0.000060 # miss rate for overall accesses -system.cpu.dcache.overall_miss_rate::total 0.000060 # miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 75194.199243 # average ReadReq miss latency -system.cpu.dcache.ReadReq_avg_miss_latency::total 75194.199243 # average ReadReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 76504.553734 # average WriteReq miss latency -system.cpu.dcache.WriteReq_avg_miss_latency::total 76504.553734 # average WriteReq miss latency -system.cpu.dcache.demand_avg_miss_latency::cpu.data 76078.688525 # average overall miss latency -system.cpu.dcache.demand_avg_miss_latency::total 76078.688525 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::cpu.data 76047.521508 # average overall miss latency -system.cpu.dcache.overall_avg_miss_latency::total 76047.521508 # average overall miss latency +system.cpu.dcache.demand_miss_rate::cpu.data 0.000059 # miss rate for demand accesses +system.cpu.dcache.demand_miss_rate::total 0.000059 # miss rate for demand accesses +system.cpu.dcache.overall_miss_rate::cpu.data 0.000059 # miss rate for overall accesses +system.cpu.dcache.overall_miss_rate::total 0.000059 # miss rate for overall accesses +system.cpu.dcache.ReadReq_avg_miss_latency::cpu.data 73655.792277 # average ReadReq miss latency +system.cpu.dcache.ReadReq_avg_miss_latency::total 73655.792277 # average ReadReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::cpu.data 77033.615990 # average WriteReq miss latency +system.cpu.dcache.WriteReq_avg_miss_latency::total 77033.615990 # average WriteReq miss latency +system.cpu.dcache.demand_avg_miss_latency::cpu.data 75977.518734 # average overall miss latency +system.cpu.dcache.demand_avg_miss_latency::total 75977.518734 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::cpu.data 75945.900957 # average overall miss latency +system.cpu.dcache.overall_avg_miss_latency::total 75945.900957 # average overall miss latency system.cpu.dcache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.dcache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.dcache.blocked::no_mshrs 0 # number of cycles access was blocked @@ -526,14 +526,14 @@ system.cpu.dcache.avg_blocked_cycles::no_mshrs nan system.cpu.dcache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked system.cpu.dcache.writebacks::writebacks 16 # number of writebacks system.cpu.dcache.writebacks::total 16 # number of writebacks -system.cpu.dcache.ReadReq_mshr_hits::cpu.data 82 # number of ReadReq MSHR hits -system.cpu.dcache.ReadReq_mshr_hits::total 82 # number of ReadReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::cpu.data 548 # number of WriteReq MSHR hits -system.cpu.dcache.WriteReq_mshr_hits::total 548 # number of WriteReq MSHR hits -system.cpu.dcache.demand_mshr_hits::cpu.data 630 # number of demand (read+write) MSHR hits -system.cpu.dcache.demand_mshr_hits::total 630 # number of demand (read+write) MSHR hits -system.cpu.dcache.overall_mshr_hits::cpu.data 630 # number of overall MSHR hits -system.cpu.dcache.overall_mshr_hits::total 630 # number of overall MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::cpu.data 40 # number of ReadReq MSHR hits +system.cpu.dcache.ReadReq_mshr_hits::total 40 # number of ReadReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::cpu.data 552 # number of WriteReq MSHR hits +system.cpu.dcache.WriteReq_mshr_hits::total 552 # number of WriteReq MSHR hits +system.cpu.dcache.demand_mshr_hits::cpu.data 592 # number of demand (read+write) MSHR hits +system.cpu.dcache.demand_mshr_hits::total 592 # number of demand (read+write) MSHR hits +system.cpu.dcache.overall_mshr_hits::cpu.data 592 # number of overall MSHR hits +system.cpu.dcache.overall_mshr_hits::total 592 # number of overall MSHR hits system.cpu.dcache.ReadReq_mshr_misses::cpu.data 711 # number of ReadReq MSHR misses system.cpu.dcache.ReadReq_mshr_misses::total 711 # number of ReadReq MSHR misses system.cpu.dcache.WriteReq_mshr_misses::cpu.data 1099 # number of WriteReq MSHR misses @@ -544,16 +544,16 @@ system.cpu.dcache.demand_mshr_misses::cpu.data 1810 system.cpu.dcache.demand_mshr_misses::total 1810 # number of demand (read+write) MSHR misses system.cpu.dcache.overall_mshr_misses::cpu.data 1811 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses::total 1811 # number of overall MSHR misses -system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 52555500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.ReadReq_mshr_miss_latency::total 52555500 # number of ReadReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 85213000 # number of WriteReq MSHR miss cycles -system.cpu.dcache.WriteReq_mshr_miss_latency::total 85213000 # number of WriteReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::cpu.data 52182500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.ReadReq_mshr_miss_latency::total 52182500 # number of ReadReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::cpu.data 86133500 # number of WriteReq MSHR miss cycles +system.cpu.dcache.WriteReq_mshr_miss_latency::total 86133500 # number of WriteReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::cpu.data 70000 # number of SoftPFReq MSHR miss cycles system.cpu.dcache.SoftPFReq_mshr_miss_latency::total 70000 # number of SoftPFReq MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::cpu.data 137768500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.demand_mshr_miss_latency::total 137768500 # number of demand (read+write) MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::cpu.data 137838500 # number of overall MSHR miss cycles -system.cpu.dcache.overall_mshr_miss_latency::total 137838500 # number of overall MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::cpu.data 138316000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.demand_mshr_miss_latency::total 138316000 # number of demand (read+write) MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::cpu.data 138386000 # number of overall MSHR miss cycles +system.cpu.dcache.overall_mshr_miss_latency::total 138386000 # number of overall MSHR miss cycles system.cpu.dcache.ReadReq_mshr_miss_rate::cpu.data 0.000025 # mshr miss rate for ReadReq accesses system.cpu.dcache.ReadReq_mshr_miss_rate::total 0.000025 # mshr miss rate for ReadReq accesses system.cpu.dcache.WriteReq_mshr_miss_rate::cpu.data 0.000089 # mshr miss rate for WriteReq accesses @@ -564,334 +564,336 @@ system.cpu.dcache.demand_mshr_miss_rate::cpu.data 0.000044 system.cpu.dcache.demand_mshr_miss_rate::total 0.000044 # mshr miss rate for demand accesses system.cpu.dcache.overall_mshr_miss_rate::cpu.data 0.000044 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate::total 0.000044 # mshr miss rate for overall accesses -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 73917.721519 # average ReadReq mshr miss latency -system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 73917.721519 # average ReadReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 77536.851683 # average WriteReq mshr miss latency -system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 77536.851683 # average WriteReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::cpu.data 73393.108298 # average ReadReq mshr miss latency +system.cpu.dcache.ReadReq_avg_mshr_miss_latency::total 73393.108298 # average ReadReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::cpu.data 78374.431301 # average WriteReq mshr miss latency +system.cpu.dcache.WriteReq_avg_mshr_miss_latency::total 78374.431301 # average WriteReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::cpu.data 70000 # average SoftPFReq mshr miss latency system.cpu.dcache.SoftPFReq_avg_mshr_miss_latency::total 70000 # average SoftPFReq mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 76115.193370 # average overall mshr miss latency -system.cpu.dcache.demand_avg_mshr_miss_latency::total 76115.193370 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 76111.816676 # average overall mshr miss latency -system.cpu.dcache.overall_avg_mshr_miss_latency::total 76111.816676 # average overall mshr miss latency -system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.icache.tags.replacements 2881 # number of replacements -system.cpu.icache.tags.tagsinuse 1423.942746 # Cycle average of tags in use -system.cpu.icache.tags.total_refs 70779397 # Total number of references to valid blocks. -system.cpu.icache.tags.sampled_refs 4677 # Sample count of references to valid blocks. -system.cpu.icache.tags.avg_refs 15133.503742 # Average number of references to valid blocks. +system.cpu.dcache.demand_avg_mshr_miss_latency::cpu.data 76417.679558 # average overall mshr miss latency +system.cpu.dcache.demand_avg_mshr_miss_latency::total 76417.679558 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::cpu.data 76414.135837 # average overall mshr miss latency +system.cpu.dcache.overall_avg_mshr_miss_latency::total 76414.135837 # average overall mshr miss latency +system.cpu.icache.tags.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.icache.tags.replacements 2864 # number of replacements +system.cpu.icache.tags.tagsinuse 1424.966015 # Cycle average of tags in use +system.cpu.icache.tags.total_refs 70941364 # Total number of references to valid blocks. +system.cpu.icache.tags.sampled_refs 4663 # Sample count of references to valid blocks. +system.cpu.icache.tags.avg_refs 15213.674459 # Average number of references to valid blocks. system.cpu.icache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.icache.tags.occ_blocks::cpu.inst 1423.942746 # Average occupied blocks per requestor -system.cpu.icache.tags.occ_percent::cpu.inst 0.695285 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_percent::total 0.695285 # Average percentage of cache occupancy -system.cpu.icache.tags.occ_task_id_blocks::1024 1796 # Occupied blocks per task id +system.cpu.icache.tags.occ_blocks::cpu.inst 1424.966015 # Average occupied blocks per requestor +system.cpu.icache.tags.occ_percent::cpu.inst 0.695784 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_percent::total 0.695784 # Average percentage of cache occupancy +system.cpu.icache.tags.occ_task_id_blocks::1024 1799 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::0 51 # Occupied blocks per task id system.cpu.icache.tags.age_task_id_blocks_1024::1 59 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::2 496 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::3 122 # Occupied blocks per task id -system.cpu.icache.tags.age_task_id_blocks_1024::4 1068 # Occupied blocks per task id -system.cpu.icache.tags.occ_task_id_percent::1024 0.876953 # Percentage of cache occupancy per task id -system.cpu.icache.tags.tag_accesses 141572827 # Number of tag accesses -system.cpu.icache.tags.data_accesses 141572827 # Number of data accesses -system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.icache.ReadReq_hits::cpu.inst 70779397 # number of ReadReq hits -system.cpu.icache.ReadReq_hits::total 70779397 # number of ReadReq hits -system.cpu.icache.demand_hits::cpu.inst 70779397 # number of demand (read+write) hits -system.cpu.icache.demand_hits::total 70779397 # number of demand (read+write) hits -system.cpu.icache.overall_hits::cpu.inst 70779397 # number of overall hits -system.cpu.icache.overall_hits::total 70779397 # number of overall hits -system.cpu.icache.ReadReq_misses::cpu.inst 4678 # number of ReadReq misses -system.cpu.icache.ReadReq_misses::total 4678 # number of ReadReq misses -system.cpu.icache.demand_misses::cpu.inst 4678 # number of demand (read+write) misses -system.cpu.icache.demand_misses::total 4678 # number of demand (read+write) misses -system.cpu.icache.overall_misses::cpu.inst 4678 # number of overall misses -system.cpu.icache.overall_misses::total 4678 # number of overall misses -system.cpu.icache.ReadReq_miss_latency::cpu.inst 198432500 # number of ReadReq miss cycles -system.cpu.icache.ReadReq_miss_latency::total 198432500 # number of ReadReq miss cycles -system.cpu.icache.demand_miss_latency::cpu.inst 198432500 # number of demand (read+write) miss cycles -system.cpu.icache.demand_miss_latency::total 198432500 # number of demand (read+write) miss cycles -system.cpu.icache.overall_miss_latency::cpu.inst 198432500 # number of overall miss cycles -system.cpu.icache.overall_miss_latency::total 198432500 # number of overall miss cycles -system.cpu.icache.ReadReq_accesses::cpu.inst 70784075 # number of ReadReq accesses(hits+misses) -system.cpu.icache.ReadReq_accesses::total 70784075 # number of ReadReq accesses(hits+misses) -system.cpu.icache.demand_accesses::cpu.inst 70784075 # number of demand (read+write) accesses -system.cpu.icache.demand_accesses::total 70784075 # number of demand (read+write) accesses -system.cpu.icache.overall_accesses::cpu.inst 70784075 # number of overall (read+write) accesses -system.cpu.icache.overall_accesses::total 70784075 # number of overall (read+write) accesses +system.cpu.icache.tags.age_task_id_blocks_1024::2 490 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::3 130 # Occupied blocks per task id +system.cpu.icache.tags.age_task_id_blocks_1024::4 1069 # Occupied blocks per task id +system.cpu.icache.tags.occ_task_id_percent::1024 0.878418 # Percentage of cache occupancy per task id +system.cpu.icache.tags.tag_accesses 141896719 # Number of tag accesses +system.cpu.icache.tags.data_accesses 141896719 # Number of data accesses +system.cpu.icache.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_hits::cpu.inst 70941364 # number of ReadReq hits +system.cpu.icache.ReadReq_hits::total 70941364 # number of ReadReq hits +system.cpu.icache.demand_hits::cpu.inst 70941364 # number of demand (read+write) hits +system.cpu.icache.demand_hits::total 70941364 # number of demand (read+write) hits +system.cpu.icache.overall_hits::cpu.inst 70941364 # number of overall hits +system.cpu.icache.overall_hits::total 70941364 # number of overall hits +system.cpu.icache.ReadReq_misses::cpu.inst 4664 # number of ReadReq misses +system.cpu.icache.ReadReq_misses::total 4664 # number of ReadReq misses +system.cpu.icache.demand_misses::cpu.inst 4664 # number of demand (read+write) misses +system.cpu.icache.demand_misses::total 4664 # number of demand (read+write) misses +system.cpu.icache.overall_misses::cpu.inst 4664 # number of overall misses +system.cpu.icache.overall_misses::total 4664 # number of overall misses +system.cpu.icache.ReadReq_miss_latency::cpu.inst 200959500 # number of ReadReq miss cycles +system.cpu.icache.ReadReq_miss_latency::total 200959500 # number of ReadReq miss cycles +system.cpu.icache.demand_miss_latency::cpu.inst 200959500 # number of demand (read+write) miss cycles +system.cpu.icache.demand_miss_latency::total 200959500 # number of demand (read+write) miss cycles +system.cpu.icache.overall_miss_latency::cpu.inst 200959500 # number of overall miss cycles +system.cpu.icache.overall_miss_latency::total 200959500 # number of overall miss cycles +system.cpu.icache.ReadReq_accesses::cpu.inst 70946028 # number of ReadReq accesses(hits+misses) +system.cpu.icache.ReadReq_accesses::total 70946028 # number of ReadReq accesses(hits+misses) +system.cpu.icache.demand_accesses::cpu.inst 70946028 # number of demand (read+write) accesses +system.cpu.icache.demand_accesses::total 70946028 # number of demand (read+write) accesses +system.cpu.icache.overall_accesses::cpu.inst 70946028 # number of overall (read+write) accesses +system.cpu.icache.overall_accesses::total 70946028 # number of overall (read+write) accesses system.cpu.icache.ReadReq_miss_rate::cpu.inst 0.000066 # miss rate for ReadReq accesses system.cpu.icache.ReadReq_miss_rate::total 0.000066 # miss rate for ReadReq accesses system.cpu.icache.demand_miss_rate::cpu.inst 0.000066 # miss rate for demand accesses system.cpu.icache.demand_miss_rate::total 0.000066 # miss rate for demand accesses system.cpu.icache.overall_miss_rate::cpu.inst 0.000066 # miss rate for overall accesses system.cpu.icache.overall_miss_rate::total 0.000066 # miss rate for overall accesses -system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 42418.234288 # average ReadReq miss latency -system.cpu.icache.ReadReq_avg_miss_latency::total 42418.234288 # average ReadReq miss latency -system.cpu.icache.demand_avg_miss_latency::cpu.inst 42418.234288 # average overall miss latency -system.cpu.icache.demand_avg_miss_latency::total 42418.234288 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::cpu.inst 42418.234288 # average overall miss latency -system.cpu.icache.overall_avg_miss_latency::total 42418.234288 # average overall miss latency +system.cpu.icache.ReadReq_avg_miss_latency::cpu.inst 43087.371355 # average ReadReq miss latency +system.cpu.icache.ReadReq_avg_miss_latency::total 43087.371355 # average ReadReq miss latency +system.cpu.icache.demand_avg_miss_latency::cpu.inst 43087.371355 # average overall miss latency +system.cpu.icache.demand_avg_miss_latency::total 43087.371355 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::cpu.inst 43087.371355 # average overall miss latency +system.cpu.icache.overall_avg_miss_latency::total 43087.371355 # average overall miss latency system.cpu.icache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.icache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.icache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.icache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.icache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.icache.writebacks::writebacks 2881 # number of writebacks -system.cpu.icache.writebacks::total 2881 # number of writebacks -system.cpu.icache.ReadReq_mshr_misses::cpu.inst 4678 # number of ReadReq MSHR misses -system.cpu.icache.ReadReq_mshr_misses::total 4678 # number of ReadReq MSHR misses -system.cpu.icache.demand_mshr_misses::cpu.inst 4678 # number of demand (read+write) MSHR misses -system.cpu.icache.demand_mshr_misses::total 4678 # number of demand (read+write) MSHR misses -system.cpu.icache.overall_mshr_misses::cpu.inst 4678 # number of overall MSHR misses -system.cpu.icache.overall_mshr_misses::total 4678 # number of overall MSHR misses -system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 193755500 # number of ReadReq MSHR miss cycles -system.cpu.icache.ReadReq_mshr_miss_latency::total 193755500 # number of ReadReq MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::cpu.inst 193755500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.demand_mshr_miss_latency::total 193755500 # number of demand (read+write) MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::cpu.inst 193755500 # number of overall MSHR miss cycles -system.cpu.icache.overall_mshr_miss_latency::total 193755500 # number of overall MSHR miss cycles +system.cpu.icache.writebacks::writebacks 2864 # number of writebacks +system.cpu.icache.writebacks::total 2864 # number of writebacks +system.cpu.icache.ReadReq_mshr_misses::cpu.inst 4664 # number of ReadReq MSHR misses +system.cpu.icache.ReadReq_mshr_misses::total 4664 # number of ReadReq MSHR misses +system.cpu.icache.demand_mshr_misses::cpu.inst 4664 # number of demand (read+write) MSHR misses +system.cpu.icache.demand_mshr_misses::total 4664 # number of demand (read+write) MSHR misses +system.cpu.icache.overall_mshr_misses::cpu.inst 4664 # number of overall MSHR misses +system.cpu.icache.overall_mshr_misses::total 4664 # number of overall MSHR misses +system.cpu.icache.ReadReq_mshr_miss_latency::cpu.inst 196296500 # number of ReadReq MSHR miss cycles +system.cpu.icache.ReadReq_mshr_miss_latency::total 196296500 # number of ReadReq MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::cpu.inst 196296500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.demand_mshr_miss_latency::total 196296500 # number of demand (read+write) MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::cpu.inst 196296500 # number of overall MSHR miss cycles +system.cpu.icache.overall_mshr_miss_latency::total 196296500 # number of overall MSHR miss cycles system.cpu.icache.ReadReq_mshr_miss_rate::cpu.inst 0.000066 # mshr miss rate for ReadReq accesses system.cpu.icache.ReadReq_mshr_miss_rate::total 0.000066 # mshr miss rate for ReadReq accesses system.cpu.icache.demand_mshr_miss_rate::cpu.inst 0.000066 # mshr miss rate for demand accesses system.cpu.icache.demand_mshr_miss_rate::total 0.000066 # mshr miss rate for demand accesses system.cpu.icache.overall_mshr_miss_rate::cpu.inst 0.000066 # mshr miss rate for overall accesses system.cpu.icache.overall_mshr_miss_rate::total 0.000066 # mshr miss rate for overall accesses -system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 41418.448055 # average ReadReq mshr miss latency -system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 41418.448055 # average ReadReq mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 41418.448055 # average overall mshr miss latency -system.cpu.icache.demand_avg_mshr_miss_latency::total 41418.448055 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 41418.448055 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_miss_latency::total 41418.448055 # average overall mshr miss latency -system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.icache.ReadReq_avg_mshr_miss_latency::cpu.inst 42087.585763 # average ReadReq mshr miss latency +system.cpu.icache.ReadReq_avg_mshr_miss_latency::total 42087.585763 # average ReadReq mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::cpu.inst 42087.585763 # average overall mshr miss latency +system.cpu.icache.demand_avg_mshr_miss_latency::total 42087.585763 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::cpu.inst 42087.585763 # average overall mshr miss latency +system.cpu.icache.overall_avg_mshr_miss_latency::total 42087.585763 # average overall mshr miss latency +system.cpu.l2cache.tags.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.tags.replacements 0 # number of replacements -system.cpu.l2cache.tags.tagsinuse 1999.548128 # Cycle average of tags in use -system.cpu.l2cache.tags.total_refs 5178 # Total number of references to valid blocks. -system.cpu.l2cache.tags.sampled_refs 2783 # Sample count of references to valid blocks. -system.cpu.l2cache.tags.avg_refs 1.860582 # Average number of references to valid blocks. +system.cpu.l2cache.tags.tagsinuse 2000.553914 # Cycle average of tags in use +system.cpu.l2cache.tags.total_refs 5137 # Total number of references to valid blocks. +system.cpu.l2cache.tags.sampled_refs 2785 # Sample count of references to valid blocks. +system.cpu.l2cache.tags.avg_refs 1.844524 # Average number of references to valid blocks. system.cpu.l2cache.tags.warmup_cycle 0 # Cycle when the warmup percentage was hit. -system.cpu.l2cache.tags.occ_blocks::writebacks 3.029345 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.inst 1506.706963 # Average occupied blocks per requestor -system.cpu.l2cache.tags.occ_blocks::cpu.data 489.811820 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::writebacks 3.029612 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.inst 1507.714154 # Average occupied blocks per requestor +system.cpu.l2cache.tags.occ_blocks::cpu.data 489.810148 # Average occupied blocks per requestor system.cpu.l2cache.tags.occ_percent::writebacks 0.000092 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::cpu.inst 0.045981 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_percent::cpu.inst 0.046012 # Average percentage of cache occupancy system.cpu.l2cache.tags.occ_percent::cpu.data 0.014948 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_percent::total 0.061021 # Average percentage of cache occupancy -system.cpu.l2cache.tags.occ_task_id_blocks::1024 2783 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_percent::total 0.061052 # Average percentage of cache occupancy +system.cpu.l2cache.tags.occ_task_id_blocks::1024 2785 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::0 37 # Occupied blocks per task id system.cpu.l2cache.tags.age_task_id_blocks_1024::1 68 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::2 526 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::3 149 # Occupied blocks per task id -system.cpu.l2cache.tags.age_task_id_blocks_1024::4 2003 # Occupied blocks per task id -system.cpu.l2cache.tags.occ_task_id_percent::1024 0.084930 # Percentage of cache occupancy per task id -system.cpu.l2cache.tags.tag_accesses 76554 # Number of tag accesses -system.cpu.l2cache.tags.data_accesses 76554 # Number of data accesses -system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states +system.cpu.l2cache.tags.age_task_id_blocks_1024::2 520 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::3 156 # Occupied blocks per task id +system.cpu.l2cache.tags.age_task_id_blocks_1024::4 2004 # Occupied blocks per task id +system.cpu.l2cache.tags.occ_task_id_percent::1024 0.084991 # Percentage of cache occupancy per task id +system.cpu.l2cache.tags.tag_accesses 76244 # Number of tag accesses +system.cpu.l2cache.tags.data_accesses 76244 # Number of data accesses +system.cpu.l2cache.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states system.cpu.l2cache.WritebackDirty_hits::writebacks 16 # number of WritebackDirty hits system.cpu.l2cache.WritebackDirty_hits::total 16 # number of WritebackDirty hits -system.cpu.l2cache.WritebackClean_hits::writebacks 2559 # number of WritebackClean hits -system.cpu.l2cache.WritebackClean_hits::total 2559 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::writebacks 2534 # number of WritebackClean hits +system.cpu.l2cache.WritebackClean_hits::total 2534 # number of WritebackClean hits system.cpu.l2cache.ReadExReq_hits::cpu.data 8 # number of ReadExReq hits system.cpu.l2cache.ReadExReq_hits::total 8 # number of ReadExReq hits -system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 2517 # number of ReadCleanReq hits -system.cpu.l2cache.ReadCleanReq_hits::total 2517 # number of ReadCleanReq hits -system.cpu.l2cache.ReadSharedReq_hits::cpu.data 81 # number of ReadSharedReq hits -system.cpu.l2cache.ReadSharedReq_hits::total 81 # number of ReadSharedReq hits -system.cpu.l2cache.demand_hits::cpu.inst 2517 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::cpu.data 89 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits::total 2606 # number of demand (read+write) hits -system.cpu.l2cache.overall_hits::cpu.inst 2517 # number of overall hits -system.cpu.l2cache.overall_hits::cpu.data 89 # number of overall hits -system.cpu.l2cache.overall_hits::total 2606 # number of overall hits +system.cpu.l2cache.ReadCleanReq_hits::cpu.inst 2502 # number of ReadCleanReq hits +system.cpu.l2cache.ReadCleanReq_hits::total 2502 # number of ReadCleanReq hits +system.cpu.l2cache.ReadSharedReq_hits::cpu.data 80 # number of ReadSharedReq hits +system.cpu.l2cache.ReadSharedReq_hits::total 80 # number of ReadSharedReq hits +system.cpu.l2cache.demand_hits::cpu.inst 2502 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::cpu.data 88 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits::total 2590 # number of demand (read+write) hits +system.cpu.l2cache.overall_hits::cpu.inst 2502 # number of overall hits +system.cpu.l2cache.overall_hits::cpu.data 88 # number of overall hits +system.cpu.l2cache.overall_hits::total 2590 # number of overall hits system.cpu.l2cache.ReadExReq_misses::cpu.data 1091 # number of ReadExReq misses system.cpu.l2cache.ReadExReq_misses::total 1091 # number of ReadExReq misses -system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2161 # number of ReadCleanReq misses -system.cpu.l2cache.ReadCleanReq_misses::total 2161 # number of ReadCleanReq misses -system.cpu.l2cache.ReadSharedReq_misses::cpu.data 631 # number of ReadSharedReq misses -system.cpu.l2cache.ReadSharedReq_misses::total 631 # number of ReadSharedReq misses -system.cpu.l2cache.demand_misses::cpu.inst 2161 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::cpu.data 1722 # number of demand (read+write) misses -system.cpu.l2cache.demand_misses::total 3883 # number of demand (read+write) misses -system.cpu.l2cache.overall_misses::cpu.inst 2161 # number of overall misses -system.cpu.l2cache.overall_misses::cpu.data 1722 # number of overall misses -system.cpu.l2cache.overall_misses::total 3883 # number of overall misses -system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 83479000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadExReq_miss_latency::total 83479000 # number of ReadExReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 159937500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadCleanReq_miss_latency::total 159937500 # number of ReadCleanReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 50622000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.ReadSharedReq_miss_latency::total 50622000 # number of ReadSharedReq miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.inst 159937500 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::cpu.data 134101000 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_latency::total 294038500 # number of demand (read+write) miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.inst 159937500 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::cpu.data 134101000 # number of overall miss cycles -system.cpu.l2cache.overall_miss_latency::total 294038500 # number of overall miss cycles +system.cpu.l2cache.ReadCleanReq_misses::cpu.inst 2162 # number of ReadCleanReq misses +system.cpu.l2cache.ReadCleanReq_misses::total 2162 # number of ReadCleanReq misses +system.cpu.l2cache.ReadSharedReq_misses::cpu.data 632 # number of ReadSharedReq misses +system.cpu.l2cache.ReadSharedReq_misses::total 632 # number of ReadSharedReq misses +system.cpu.l2cache.demand_misses::cpu.inst 2162 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::cpu.data 1723 # number of demand (read+write) misses +system.cpu.l2cache.demand_misses::total 3885 # number of demand (read+write) misses +system.cpu.l2cache.overall_misses::cpu.inst 2162 # number of overall misses +system.cpu.l2cache.overall_misses::cpu.data 1723 # number of overall misses +system.cpu.l2cache.overall_misses::total 3885 # number of overall misses +system.cpu.l2cache.ReadExReq_miss_latency::cpu.data 84399500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadExReq_miss_latency::total 84399500 # number of ReadExReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::cpu.inst 162646500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadCleanReq_miss_latency::total 162646500 # number of ReadCleanReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::cpu.data 50260000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.ReadSharedReq_miss_latency::total 50260000 # number of ReadSharedReq miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.inst 162646500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::cpu.data 134659500 # number of demand (read+write) miss cycles +system.cpu.l2cache.demand_miss_latency::total 297306000 # number of demand (read+write) miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.inst 162646500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::cpu.data 134659500 # number of overall miss cycles +system.cpu.l2cache.overall_miss_latency::total 297306000 # number of overall miss cycles system.cpu.l2cache.WritebackDirty_accesses::writebacks 16 # number of WritebackDirty accesses(hits+misses) system.cpu.l2cache.WritebackDirty_accesses::total 16 # number of WritebackDirty accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::writebacks 2559 # number of WritebackClean accesses(hits+misses) -system.cpu.l2cache.WritebackClean_accesses::total 2559 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::writebacks 2534 # number of WritebackClean accesses(hits+misses) +system.cpu.l2cache.WritebackClean_accesses::total 2534 # number of WritebackClean accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::cpu.data 1099 # number of ReadExReq accesses(hits+misses) system.cpu.l2cache.ReadExReq_accesses::total 1099 # number of ReadExReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 4678 # number of ReadCleanReq accesses(hits+misses) -system.cpu.l2cache.ReadCleanReq_accesses::total 4678 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::cpu.inst 4664 # number of ReadCleanReq accesses(hits+misses) +system.cpu.l2cache.ReadCleanReq_accesses::total 4664 # number of ReadCleanReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::cpu.data 712 # number of ReadSharedReq accesses(hits+misses) system.cpu.l2cache.ReadSharedReq_accesses::total 712 # number of ReadSharedReq accesses(hits+misses) -system.cpu.l2cache.demand_accesses::cpu.inst 4678 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses::cpu.inst 4664 # number of demand (read+write) accesses system.cpu.l2cache.demand_accesses::cpu.data 1811 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses::total 6489 # number of demand (read+write) accesses -system.cpu.l2cache.overall_accesses::cpu.inst 4678 # number of overall (read+write) accesses +system.cpu.l2cache.demand_accesses::total 6475 # number of demand (read+write) accesses +system.cpu.l2cache.overall_accesses::cpu.inst 4664 # number of overall (read+write) accesses system.cpu.l2cache.overall_accesses::cpu.data 1811 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses::total 6489 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses::total 6475 # number of overall (read+write) accesses system.cpu.l2cache.ReadExReq_miss_rate::cpu.data 0.992721 # miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_miss_rate::total 0.992721 # miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.461950 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.461950 # miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.886236 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.886236 # miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_miss_rate::cpu.inst 0.461950 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::cpu.data 0.950856 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate::total 0.598397 # miss rate for demand accesses -system.cpu.l2cache.overall_miss_rate::cpu.inst 0.461950 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::cpu.data 0.950856 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate::total 0.598397 # miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 76516.040330 # average ReadExReq miss latency -system.cpu.l2cache.ReadExReq_avg_miss_latency::total 76516.040330 # average ReadExReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 74010.874595 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 74010.874595 # average ReadCleanReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 80225.039620 # average ReadSharedReq miss latency -system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 80225.039620 # average ReadSharedReq miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 74010.874595 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::cpu.data 77875.145180 # average overall miss latency -system.cpu.l2cache.demand_avg_miss_latency::total 75724.568633 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 74010.874595 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::cpu.data 77875.145180 # average overall miss latency -system.cpu.l2cache.overall_avg_miss_latency::total 75724.568633 # average overall miss latency +system.cpu.l2cache.ReadCleanReq_miss_rate::cpu.inst 0.463551 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_miss_rate::total 0.463551 # miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::cpu.data 0.887640 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.ReadSharedReq_miss_rate::total 0.887640 # miss rate for ReadSharedReq accesses +system.cpu.l2cache.demand_miss_rate::cpu.inst 0.463551 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::cpu.data 0.951408 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate::total 0.600000 # miss rate for demand accesses +system.cpu.l2cache.overall_miss_rate::cpu.inst 0.463551 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::cpu.data 0.951408 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate::total 0.600000 # miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_miss_latency::cpu.data 77359.761687 # average ReadExReq miss latency +system.cpu.l2cache.ReadExReq_avg_miss_latency::total 77359.761687 # average ReadExReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::cpu.inst 75229.648474 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadCleanReq_avg_miss_latency::total 75229.648474 # average ReadCleanReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::cpu.data 79525.316456 # average ReadSharedReq miss latency +system.cpu.l2cache.ReadSharedReq_avg_miss_latency::total 79525.316456 # average ReadSharedReq miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.inst 75229.648474 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::cpu.data 78154.091701 # average overall miss latency +system.cpu.l2cache.demand_avg_miss_latency::total 76526.640927 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.inst 75229.648474 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::cpu.data 78154.091701 # average overall miss latency +system.cpu.l2cache.overall_avg_miss_latency::total 76526.640927 # average overall miss latency system.cpu.l2cache.blocked_cycles::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked::no_targets 0 # number of cycles access was blocked system.cpu.l2cache.avg_blocked_cycles::no_mshrs nan # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles::no_targets nan # average number of cycles each access was blocked -system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 2 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadCleanReq_mshr_hits::total 2 # number of ReadCleanReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 14 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.ReadSharedReq_mshr_hits::total 14 # number of ReadSharedReq MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.inst 2 # number of demand (read+write) MSHR hits -system.cpu.l2cache.demand_mshr_hits::cpu.data 14 # number of demand (read+write) MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::cpu.inst 1 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadCleanReq_mshr_hits::total 1 # number of ReadCleanReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::cpu.data 15 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.ReadSharedReq_mshr_hits::total 15 # number of ReadSharedReq MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.inst 1 # number of demand (read+write) MSHR hits +system.cpu.l2cache.demand_mshr_hits::cpu.data 15 # number of demand (read+write) MSHR hits system.cpu.l2cache.demand_mshr_hits::total 16 # number of demand (read+write) MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.inst 2 # number of overall MSHR hits -system.cpu.l2cache.overall_mshr_hits::cpu.data 14 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.inst 1 # number of overall MSHR hits +system.cpu.l2cache.overall_mshr_hits::cpu.data 15 # number of overall MSHR hits system.cpu.l2cache.overall_mshr_hits::total 16 # number of overall MSHR hits system.cpu.l2cache.ReadExReq_mshr_misses::cpu.data 1091 # number of ReadExReq MSHR misses system.cpu.l2cache.ReadExReq_mshr_misses::total 1091 # number of ReadExReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2159 # number of ReadCleanReq MSHR misses -system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2159 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::cpu.inst 2161 # number of ReadCleanReq MSHR misses +system.cpu.l2cache.ReadCleanReq_mshr_misses::total 2161 # number of ReadCleanReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::cpu.data 617 # number of ReadSharedReq MSHR misses system.cpu.l2cache.ReadSharedReq_mshr_misses::total 617 # number of ReadSharedReq MSHR misses -system.cpu.l2cache.demand_mshr_misses::cpu.inst 2159 # number of demand (read+write) MSHR misses +system.cpu.l2cache.demand_mshr_misses::cpu.inst 2161 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses::cpu.data 1708 # number of demand (read+write) MSHR misses -system.cpu.l2cache.demand_mshr_misses::total 3867 # number of demand (read+write) MSHR misses -system.cpu.l2cache.overall_mshr_misses::cpu.inst 2159 # number of overall MSHR misses +system.cpu.l2cache.demand_mshr_misses::total 3869 # number of demand (read+write) MSHR misses +system.cpu.l2cache.overall_mshr_misses::cpu.inst 2161 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses::cpu.data 1708 # number of overall MSHR misses -system.cpu.l2cache.overall_mshr_misses::total 3867 # number of overall MSHR misses -system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 72569000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 72569000 # number of ReadExReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 138134000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 138134000 # number of ReadCleanReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 43490000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 43490000 # number of ReadSharedReq MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 138134000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 116059000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_latency::total 254193000 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 138134000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 116059000 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_latency::total 254193000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_misses::total 3869 # number of overall MSHR misses +system.cpu.l2cache.ReadExReq_mshr_miss_latency::cpu.data 73489500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadExReq_mshr_miss_latency::total 73489500 # number of ReadExReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::cpu.inst 140980000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadCleanReq_mshr_miss_latency::total 140980000 # number of ReadCleanReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::cpu.data 43051500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.ReadSharedReq_mshr_miss_latency::total 43051500 # number of ReadSharedReq MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.inst 140980000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::cpu.data 116541000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.demand_mshr_miss_latency::total 257521000 # number of demand (read+write) MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.inst 140980000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::cpu.data 116541000 # number of overall MSHR miss cycles +system.cpu.l2cache.overall_mshr_miss_latency::total 257521000 # number of overall MSHR miss cycles system.cpu.l2cache.ReadExReq_mshr_miss_rate::cpu.data 0.992721 # mshr miss rate for ReadExReq accesses system.cpu.l2cache.ReadExReq_mshr_miss_rate::total 0.992721 # mshr miss rate for ReadExReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.461522 # mshr miss rate for ReadCleanReq accesses -system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.461522 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::cpu.inst 0.463336 # mshr miss rate for ReadCleanReq accesses +system.cpu.l2cache.ReadCleanReq_mshr_miss_rate::total 0.463336 # mshr miss rate for ReadCleanReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::cpu.data 0.866573 # mshr miss rate for ReadSharedReq accesses system.cpu.l2cache.ReadSharedReq_mshr_miss_rate::total 0.866573 # mshr miss rate for ReadSharedReq accesses -system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.461522 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate::cpu.inst 0.463336 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate::cpu.data 0.943125 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate::total 0.595932 # mshr miss rate for demand accesses -system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.461522 # mshr miss rate for overall accesses +system.cpu.l2cache.demand_mshr_miss_rate::total 0.597529 # mshr miss rate for demand accesses +system.cpu.l2cache.overall_mshr_miss_rate::cpu.inst 0.463336 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate::cpu.data 0.943125 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate::total 0.595932 # mshr miss rate for overall accesses -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 66516.040330 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 66516.040330 # average ReadExReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 63980.546549 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 63980.546549 # average ReadCleanReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 70486.223663 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 70486.223663 # average ReadSharedReq mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 63980.546549 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 67950.234192 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency::total 65733.902250 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 63980.546549 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 67950.234192 # average overall mshr miss latency -system.cpu.l2cache.overall_avg_mshr_miss_latency::total 65733.902250 # average overall mshr miss latency -system.cpu.toL2Bus.snoop_filter.tot_requests 9412 # Total number of requests made to the snoop filter. -system.cpu.toL2Bus.snoop_filter.hit_single_requests 3057 # Number of requests hitting in the snoop filter with a single holder of the requested data. -system.cpu.toL2Bus.snoop_filter.hit_multi_requests 328 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. +system.cpu.l2cache.overall_mshr_miss_rate::total 0.597529 # mshr miss rate for overall accesses +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::cpu.data 67359.761687 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency::total 67359.761687 # average ReadExReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::cpu.inst 65238.315595 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadCleanReq_avg_mshr_miss_latency::total 65238.315595 # average ReadCleanReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::cpu.data 69775.526742 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.ReadSharedReq_avg_mshr_miss_latency::total 69775.526742 # average ReadSharedReq mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.inst 65238.315595 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::cpu.data 68232.435597 # average overall mshr miss latency +system.cpu.l2cache.demand_avg_mshr_miss_latency::total 66560.093047 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.inst 65238.315595 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::cpu.data 68232.435597 # average overall mshr miss latency +system.cpu.l2cache.overall_avg_mshr_miss_latency::total 66560.093047 # average overall mshr miss latency +system.cpu.toL2Bus.snoop_filter.tot_requests 9381 # Total number of requests made to the snoop filter. +system.cpu.toL2Bus.snoop_filter.hit_single_requests 3042 # Number of requests hitting in the snoop filter with a single holder of the requested data. +system.cpu.toL2Bus.snoop_filter.hit_multi_requests 336 # Number of requests hitting in the snoop filter with multiple (>1) holders of the requested data. system.cpu.toL2Bus.snoop_filter.tot_snoops 0 # Total number of snoops made to the snoop filter. system.cpu.toL2Bus.snoop_filter.hit_single_snoops 0 # Number of snoops hitting in the snoop filter with a single holder of the requested data. system.cpu.toL2Bus.snoop_filter.hit_multi_snoops 0 # Number of snoops hitting in the snoop filter with multiple (>1) holders of the requested data. -system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.cpu.toL2Bus.trans_dist::ReadResp 5389 # Transaction distribution +system.cpu.toL2Bus.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.cpu.toL2Bus.trans_dist::ReadResp 5375 # Transaction distribution system.cpu.toL2Bus.trans_dist::WritebackDirty 16 # Transaction distribution -system.cpu.toL2Bus.trans_dist::WritebackClean 2881 # Transaction distribution +system.cpu.toL2Bus.trans_dist::WritebackClean 2864 # Transaction distribution system.cpu.toL2Bus.trans_dist::CleanEvict 26 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExReq 1099 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadExResp 1099 # Transaction distribution -system.cpu.toL2Bus.trans_dist::ReadCleanReq 4678 # Transaction distribution +system.cpu.toL2Bus.trans_dist::ReadCleanReq 4664 # Transaction distribution system.cpu.toL2Bus.trans_dist::ReadSharedReq 712 # Transaction distribution -system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 12236 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 12191 # Packet count per connected master and slave (bytes) system.cpu.toL2Bus.pkt_count_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 3664 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_count::total 15900 # Packet count per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 483712 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_count::total 15855 # Packet count per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_side 481728 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 116928 # Cumulative packet size per connected master and slave (bytes) -system.cpu.toL2Bus.pkt_size::total 600640 # Cumulative packet size per connected master and slave (bytes) +system.cpu.toL2Bus.pkt_size::total 598656 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 0 # Total snoops (count) -system.cpu.toL2Bus.snoop_fanout::samples 6489 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::mean 0.071197 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::stdev 0.257174 # Request fanout histogram +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) +system.cpu.toL2Bus.snoop_fanout::samples 6475 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::mean 0.072896 # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::stdev 0.259985 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::0 6027 92.88% 92.88% # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::1 462 7.12% 100.00% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::0 6003 92.71% 92.71% # Request fanout histogram +system.cpu.toL2Bus.snoop_fanout::1 472 7.29% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::2 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::min_value 0 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::max_value 1 # Request fanout histogram -system.cpu.toL2Bus.snoop_fanout::total 6489 # Request fanout histogram -system.cpu.toL2Bus.reqLayer0.occupancy 7603000 # Layer occupancy (ticks) +system.cpu.toL2Bus.snoop_fanout::total 6475 # Request fanout histogram +system.cpu.toL2Bus.reqLayer0.occupancy 7570500 # Layer occupancy (ticks) system.cpu.toL2Bus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer0.occupancy 7016498 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer0.occupancy 6994999 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer0.utilization 0.0 # Layer utilization (%) -system.cpu.toL2Bus.respLayer1.occupancy 2723486 # Layer occupancy (ticks) +system.cpu.toL2Bus.respLayer1.occupancy 2723985 # Layer occupancy (ticks) system.cpu.toL2Bus.respLayer1.utilization 0.0 # Layer utilization (%) -system.membus.pwrStateResidencyTicks::UNDEFINED 130382890500 # Cumulative time (in ticks) in various power states -system.membus.trans_dist::ReadResp 2775 # Transaction distribution +system.membus.pwrStateResidencyTicks::UNDEFINED 132485848500 # Cumulative time (in ticks) in various power states +system.membus.trans_dist::ReadResp 2777 # Transaction distribution system.membus.trans_dist::ReadExReq 1091 # Transaction distribution system.membus.trans_dist::ReadExResp 1091 # Transaction distribution -system.membus.trans_dist::ReadSharedReq 2775 # Transaction distribution -system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 7732 # Packet count per connected master and slave (bytes) -system.membus.pkt_count::total 7732 # Packet count per connected master and slave (bytes) -system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 247424 # Cumulative packet size per connected master and slave (bytes) -system.membus.pkt_size::total 247424 # Cumulative packet size per connected master and slave (bytes) +system.membus.trans_dist::ReadSharedReq 2777 # Transaction distribution +system.membus.pkt_count_system.cpu.l2cache.mem_side::system.physmem.port 7736 # Packet count per connected master and slave (bytes) +system.membus.pkt_count::total 7736 # Packet count per connected master and slave (bytes) +system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 247552 # Cumulative packet size per connected master and slave (bytes) +system.membus.pkt_size::total 247552 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) -system.membus.snoop_fanout::samples 3866 # Request fanout histogram +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) +system.membus.snoop_fanout::samples 3868 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram system.membus.snoop_fanout::underflows 0 0.00% 0.00% # Request fanout histogram -system.membus.snoop_fanout::0 3866 100.00% 100.00% # Request fanout histogram +system.membus.snoop_fanout::0 3868 100.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::1 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::overflows 0 0.00% 100.00% # Request fanout histogram system.membus.snoop_fanout::min_value 0 # Request fanout histogram system.membus.snoop_fanout::max_value 0 # Request fanout histogram -system.membus.snoop_fanout::total 3866 # Request fanout histogram -system.membus.reqLayer0.occupancy 4516500 # Layer occupancy (ticks) +system.membus.snoop_fanout::total 3868 # Request fanout histogram +system.membus.reqLayer0.occupancy 4518000 # Layer occupancy (ticks) system.membus.reqLayer0.utilization 0.0 # Layer utilization (%) -system.membus.respLayer1.occupancy 20548250 # Layer occupancy (ticks) +system.membus.respLayer1.occupancy 20557500 # Layer occupancy (ticks) system.membus.respLayer1.utilization 0.0 # Layer utilization (%) ---------- End Simulation Statistics ---------- diff --git a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/config.ini b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/config.ini index 71957ae5a..174895907 100644 --- a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/config.ini +++ b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,6 +28,10 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= thermal_components= @@ -72,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=2 decodeWidth=3 +default_p_state=UNDEFINED dispatchWidth=6 do_checkpoint_insts=true do_quiesce=true @@ -110,6 +116,10 @@ numPhysIntRegs=128 numROBEntries=40 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -166,12 +176,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=6 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -190,8 +205,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -214,9 +234,14 @@ walker=system.cpu.dstage2_mmu.stage2_tlb.walker [system.cpu.dstage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.dtb] @@ -230,9 +255,14 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[3] @@ -508,12 +538,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=1 is_read_only=true max_miss_count=0 mshrs=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=1 @@ -532,8 +567,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=32768 @@ -591,9 +631,14 @@ walker=system.cpu.istage2_mmu.stage2_tlb.walker [system.cpu.istage2_mmu.stage2_tlb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=true num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system [system.cpu.itb] @@ -607,9 +652,14 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=ArmTableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 is_stage2=false num_squash_per_cycle=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sys=system port=system.cpu.toL2Bus.slave[2] @@ -620,12 +670,17 @@ addr_ranges=0:18446744073709551615 assoc=16 clk_domain=system.cpu_clk_domain clusivity=mostly_excl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=12 is_read_only=false max_miss_count=0 mshrs=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=true prefetcher=system.cpu.l2cache.prefetcher response_latency=12 @@ -643,6 +698,7 @@ mem_side=system.membus.slave[1] type=StridePrefetcher cache_snoop=false clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED degree=8 eventq_index=0 latency=1 @@ -653,6 +709,10 @@ on_inst=true on_miss=false on_read=true on_write=true +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null queue_filter=true queue_size=32 queue_squash=true @@ -669,8 +729,13 @@ type=RandomRepl assoc=16 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=12 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=1048576 @@ -678,10 +743,15 @@ size=1048576 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -712,7 +782,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/arm/linux/twolf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/arm/linux/twolf gid=100 input=cin kvmInSE=false @@ -744,10 +814,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -791,6 +866,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -802,7 +878,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simerr b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simerr index 341b479f7..bbcd9d751 100755 --- a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simerr +++ b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simerr @@ -1,2 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simout b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simout index 00456d1c3..998b0d088 100755 --- a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simout +++ b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/simout @@ -3,11 +3,13 @@ Redirecting stderr to build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing/s gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 15 2016 19:53:43 -gem5 started Mar 15 2016 20:14:36 -gem5 executing on dinar2c11, pid 10702 -command line: build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing +gem5 compiled Jul 21 2016 14:37:41 +gem5 started Jul 21 2016 14:50:24 +gem5 executing on e108600-lin, pid 23312 +command line: /work/curdun01/gem5-external.hg/build/ARM/gem5.opt -d build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/70.twolf/arm/linux/o3-timing +Couldn't unlink build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing/smred.sav +Couldn't unlink build/ARM/tests/opt/long/se/70.twolf/arm/linux/o3-timing/smred.sv2 Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... diff --git a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/stats.txt b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/stats.txt index f718004ff..a5c8823ea 100644 --- a/tests/long/se/70.twolf/ref/arm/linux/o3-timing/stats.txt +++ b/tests/long/se/70.twolf/ref/arm/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.084938 # Nu sim_ticks 84937723500 # Number of ticks simulated final_tick 84937723500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 205612 # Simulator instruction rate (inst/s) -host_op_rate 216749 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 101357587 # Simulator tick rate (ticks/s) -host_mem_usage 315376 # Number of bytes of host memory used -host_seconds 838.00 # Real time elapsed on the host +host_inst_rate 112842 # Simulator instruction rate (inst/s) +host_op_rate 118955 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 55626314 # Simulator tick rate (ticks/s) +host_mem_usage 268228 # Number of bytes of host memory used +host_seconds 1526.93 # Real time elapsed on the host sim_insts 172303022 # Number of instructions simulated sim_ops 181635954 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -1143,6 +1143,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 9323136 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 16219648 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 13357 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 0 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 140586 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.219979 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.541213 # Request fanout histogram @@ -1170,6 +1171,7 @@ system.membus.pkt_count::total 24701 # Pa system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 790400 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 790400 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 12351 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram diff --git a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/config.ini b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/config.ini index c90c082f2..4ca9409ac 100644 --- a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/config.ini +++ b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/config.ini @@ -14,6 +14,7 @@ children=clk_domain cpu cpu_clk_domain dvfs_handler membus physmem voltage_domai boot_osflags=a cache_line_size=64 clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 exit_on_work_items=false init_param=0 @@ -27,8 +28,14 @@ memories=system.physmem mmap_using_noreserve=false multi_thread=false num_work_ids=16 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null readfile= symbolfile= +thermal_components= +thermal_model=Null work_begin_ckpt_count=0 work_begin_cpu_id_exit=-1 work_begin_exit_count=0 @@ -70,6 +77,7 @@ cpu_id=0 decodeToFetchDelay=1 decodeToRenameDelay=1 decodeWidth=8 +default_p_state=UNDEFINED dispatchWidth=8 do_checkpoint_insts=true do_quiesce=true @@ -106,6 +114,10 @@ numPhysIntRegs=256 numROBEntries=192 numRobs=1 numThreads=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null profile=0 progress_interval=0 renameToDecodeDelay=1 @@ -151,11 +163,18 @@ choicePredictorSize=8192 eventq_index=0 globalCtrBits=2 globalPredictorSize=8192 +indirectHashGHR=true +indirectHashTargets=true +indirectPathLength=3 +indirectSets=256 +indirectTagSize=16 +indirectWays=2 instShiftAmt=2 localCtrBits=2 localHistoryTableSize=2048 localPredictorSize=2048 numThreads=1 +useIndirect=true [system.cpu.dcache] type=Cache @@ -164,12 +183,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=false max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -188,8 +212,13 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=262144 @@ -203,8 +232,13 @@ walker=system.cpu.dtb.walker [system.cpu.dtb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[3] @@ -522,12 +556,17 @@ addr_ranges=0:18446744073709551615 assoc=2 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=2 is_read_only=true max_miss_count=0 mshrs=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=2 @@ -546,18 +585,28 @@ type=LRU assoc=2 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=2 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=131072 [system.cpu.interrupts] type=X86LocalApic clk_domain=system.cpu.apic_clk_domain +default_p_state=UNDEFINED eventq_index=0 int_latency=1000 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 pio_addr=2305843009213693952 pio_latency=100000 +power_model=Null system=system int_master=system.membus.slave[2] int_slave=system.membus.master[2] @@ -577,8 +626,13 @@ walker=system.cpu.itb.walker [system.cpu.itb.walker] type=X86PagetableWalker clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 num_squash_per_cycle=4 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null system=system port=system.cpu.toL2Bus.slave[2] @@ -589,12 +643,17 @@ addr_ranges=0:18446744073709551615 assoc=8 clk_domain=system.cpu_clk_domain clusivity=mostly_incl +default_p_state=UNDEFINED demand_mshr_reserve=1 eventq_index=0 hit_latency=20 is_read_only=false max_miss_count=0 mshrs=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null prefetch_on_access=false prefetcher=Null response_latency=20 @@ -613,8 +672,13 @@ type=LRU assoc=8 block_size=64 clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 hit_latency=20 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 +power_model=Null sequential_access=false size=2097152 @@ -622,10 +686,15 @@ size=2097152 type=CoherentXBar children=snoop_filter clk_domain=system.cpu_clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=0 frontend_latency=1 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=false +power_model=Null response_latency=1 snoop_filter=system.cpu.toL2Bus.snoop_filter snoop_response_latency=1 @@ -656,7 +725,7 @@ env= errout=cerr euid=100 eventq_index=0 -executable=/dist/m5/cpu2000/binaries/x86/linux/twolf +executable=/arm/projectscratch/randd/systems/dist/cpu2000/binaries/x86/linux/twolf gid=100 input=cin kvmInSE=false @@ -688,10 +757,15 @@ transition_latency=100000000 [system.membus] type=CoherentXBar clk_domain=system.clk_domain +default_p_state=UNDEFINED eventq_index=0 forward_latency=4 frontend_latency=3 +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 point_of_coherency=true +power_model=Null response_latency=2 snoop_filter=Null snoop_response_latency=4 @@ -735,6 +809,7 @@ burst_length=8 channels=1 clk_domain=system.clk_domain conf_table_reported=true +default_p_state=UNDEFINED device_bus_width=8 device_rowbuffer_size=1024 device_size=536870912 @@ -746,7 +821,11 @@ max_accesses_per_row=16 mem_sched_policy=frfcfs min_writes_per_switch=16 null=false +p_state_clk_gate_bins=20 +p_state_clk_gate_max=1000000000000 +p_state_clk_gate_min=1000 page_policy=open_adaptive +power_model=Null range=0:134217727 ranks_per_channel=2 read_buffer_size=32 diff --git a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simerr b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simerr index f9e2ef3b2..bbcd9d751 100755 --- a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simerr +++ b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simerr @@ -1 +1,3 @@ warn: DRAM device capacity (8192 Mbytes) does not match the address range assigned (128 Mbytes) +warn: Sockets disabled, not accepting gdb connections +warn: ClockedObject: More than one power state change request encountered within the same simulation tick diff --git a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simout b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simout index 965d23114..6416a69a9 100755 --- a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simout +++ b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/simout @@ -3,11 +3,13 @@ Redirecting stderr to build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing/s gem5 Simulator System. http://gem5.org gem5 is copyrighted software; use the --copyright option for details. -gem5 compiled Mar 16 2016 22:57:26 -gem5 started Mar 16 2016 22:58:08 -gem5 executing on dinar2c11, pid 24733 -command line: build/X86/gem5.opt -d build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing -re /home/stever/gem5-public/tests/run.py build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing +gem5 compiled Jul 21 2016 14:35:23 +gem5 started Jul 21 2016 14:36:17 +gem5 executing on e108600-lin, pid 18548 +command line: /work/curdun01/gem5-external.hg/build/X86/gem5.opt -d build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing -re /work/curdun01/gem5-external.hg/tests/testing/../run.py long/se/70.twolf/x86/linux/o3-timing +Couldn't unlink build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing/smred.sav +Couldn't unlink build/X86/tests/opt/long/se/70.twolf/x86/linux/o3-timing/smred.sv2 Global frequency set at 1000000000000 ticks per second info: Entering event queue @ 0. Starting simulation... @@ -17,7 +19,9792 @@ Authors: Carl Sechen, Bill Swartz Yale University info: Increasing stack size by one page. info: Increasing stack size by one page. - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +info: Increasing stack size by one page. + 1 2 3 4 5 6 7 8 9 10 11 info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. +info: Increasing stack size by one page. + 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 @@ -25,5 +9812,4 @@ info: Increasing stack size by one page. 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 -122 123 124 info: Increasing stack size by one page. -Exiting @ tick 79140979500 because target called exit() +122 123 124 Exiting @ tick 103324153500 because target called exit() diff --git a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/stats.txt b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/stats.txt index 145f8838d..d24e062d1 100644 --- a/tests/long/se/70.twolf/ref/x86/linux/o3-timing/stats.txt +++ b/tests/long/se/70.twolf/ref/x86/linux/o3-timing/stats.txt @@ -4,11 +4,11 @@ sim_seconds 0.103324 # Nu sim_ticks 103324153500 # Number of ticks simulated final_tick 103324153500 # Number of ticks from beginning of simulation (restored from checkpoints and never reset) sim_freq 1000000000000 # Frequency of simulated ticks -host_inst_rate 98344 # Simulator instruction rate (inst/s) -host_op_rate 164833 # Simulator op (including micro ops) rate (op/s) -host_tick_rate 76937982 # Simulator tick rate (ticks/s) -host_mem_usage 350904 # Number of bytes of host memory used -host_seconds 1342.95 # Real time elapsed on the host +host_inst_rate 51505 # Simulator instruction rate (inst/s) +host_op_rate 86327 # Simulator op (including micro ops) rate (op/s) +host_tick_rate 40294413 # Simulator tick rate (ticks/s) +host_mem_usage 304184 # Number of bytes of host memory used +host_seconds 2564.23 # Real time elapsed on the host sim_insts 132071192 # Number of instructions simulated sim_ops 221363384 # Number of ops (including micro ops) simulated system.voltage_domain.voltage 1 # Voltage in Volts @@ -974,6 +974,7 @@ system.cpu.toL2Bus.pkt_size_system.cpu.icache.mem_side::system.cpu.l2cache.cpu_s system.cpu.toL2Bus.pkt_size_system.cpu.dcache.mem_side::system.cpu.l2cache.cpu_side 136384 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.pkt_size::total 1096896 # Cumulative packet size per connected master and slave (bytes) system.cpu.toL2Bus.snoops 507 # Total snoops (count) +system.cpu.toL2Bus.snoopTraffic 32448 # Total snoop traffic (bytes) system.cpu.toL2Bus.snoop_fanout::samples 11619 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::mean 0.094328 # Request fanout histogram system.cpu.toL2Bus.snoop_fanout::stdev 0.292297 # Request fanout histogram @@ -1004,6 +1005,7 @@ system.membus.pkt_size_system.cpu.l2cache.mem_side::system.physmem.port 36 system.membus.pkt_size_system.cpu.l2cache.mem_side::total 361984 # Cumulative packet size per connected master and slave (bytes) system.membus.pkt_size::total 361984 # Cumulative packet size per connected master and slave (bytes) system.membus.snoops 0 # Total snoops (count) +system.membus.snoopTraffic 0 # Total snoop traffic (bytes) system.membus.snoop_fanout::samples 6156 # Request fanout histogram system.membus.snoop_fanout::mean 0 # Request fanout histogram system.membus.snoop_fanout::stdev 0 # Request fanout histogram |