From 8dcca68234bb2881af1380c09ac8fe9ff7075a15 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Thu, 5 Oct 2006 21:14:43 -0700 Subject: remove traces of binning --HG-- extra : convert_revision : b33cc67cfde04c9af6f50cbef538104e1298bedc --- src/arch/sparc/system.cc | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/arch/sparc/system.cc b/src/arch/sparc/system.cc index 63cbbe057..ef6443d17 100644 --- a/src/arch/sparc/system.cc +++ b/src/arch/sparc/system.cc @@ -152,10 +152,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(SparcSystem) Param readfile; Param init_param; - Param bin; - VectorParam binned_fns; - Param bin_int; - END_DECLARE_SIM_OBJECT_PARAMS(SparcSystem) BEGIN_INIT_SIM_OBJECT_PARAMS(SparcSystem) @@ -173,10 +169,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(SparcSystem) INIT_PARAM_DFLT(readfile, "file to read startup script from", ""), INIT_PARAM_DFLT(init_param, "numerical value to pass into simulator", 0), INIT_PARAM_DFLT(system_type, "Type of system we are emulating", 34), - INIT_PARAM_DFLT(system_rev, "Revision of system we are emulating", 1<<10), - INIT_PARAM_DFLT(bin, "is this system to be binned", false), - INIT_PARAM(binned_fns, "functions to be broken down and binned"), - INIT_PARAM_DFLT(bin_int, "is interrupt code binned seperately?", true) + INIT_PARAM_DFLT(system_rev, "Revision of system we are emulating", 1<<10) END_INIT_SIM_OBJECT_PARAMS(SparcSystem) @@ -196,9 +189,6 @@ CREATE_SIM_OBJECT(SparcSystem) p->readfile = readfile; p->system_type = system_type; p->system_rev = system_rev; - p->bin = bin; - p->binned_fns = binned_fns; - p->bin_int = bin_int; return new SparcSystem(p); } -- cgit v1.2.3 From 230fb85a8a9990e1c2511f34a07ba4250becff82 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Fri, 6 Oct 2006 00:39:21 -0400 Subject: update full system references for newest disk image from linux-dist. --HG-- extra : convert_revision : c1232dafff0d92d8041af1b9de1dc8c55ee50f40 --- .../linux/tsunami-simple-atomic-dual/config.ini | 4 + .../linux/tsunami-simple-atomic-dual/config.out | 7 + .../console.system.sim_console | 10 +- .../linux/tsunami-simple-atomic-dual/m5stats.txt | 409 ++++++++++----------- .../alpha/linux/tsunami-simple-atomic-dual/stdout | 6 +- .../alpha/linux/tsunami-simple-atomic/config.ini | 3 + .../alpha/linux/tsunami-simple-atomic/config.out | 6 + .../console.system.sim_console | 10 +- .../alpha/linux/tsunami-simple-atomic/m5stats.txt | 219 ++++++----- .../ref/alpha/linux/tsunami-simple-atomic/stdout | 6 +- .../linux/tsunami-simple-timing-dual/config.ini | 4 + .../linux/tsunami-simple-timing-dual/config.out | 7 + .../console.system.sim_console | 10 +- .../linux/tsunami-simple-timing-dual/m5stats.txt | 407 ++++++++++---------- .../alpha/linux/tsunami-simple-timing-dual/stdout | 6 +- .../alpha/linux/tsunami-simple-timing/config.ini | 3 + .../alpha/linux/tsunami-simple-timing/config.out | 6 + .../console.system.sim_console | 10 +- .../alpha/linux/tsunami-simple-timing/m5stats.txt | 217 ++++++----- .../ref/alpha/linux/tsunami-simple-timing/stdout | 6 +- 20 files changed, 680 insertions(+), 676 deletions(-) diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini index c2bcb99aa..3d719c501 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini @@ -58,6 +58,7 @@ mem_mode=atomic pal=/dist/m5/system/binaries/ts_osfpal physmem=system.physmem readfile=tests/halt.sh +symbolfile= system_rev=1024 system_type=34 @@ -86,6 +87,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 simulate_stalls=false system=system width=1 @@ -116,6 +118,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 simulate_stalls=false system=system width=1 @@ -584,6 +587,7 @@ pio=system.iobus.port[24] [trace] bufsize=0 +cycle=0 dump_on_exit=false file=cout flags= diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out index 737ee6611..b8290213e 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out @@ -21,6 +21,7 @@ console=/dist/m5/system/binaries/console pal=/dist/m5/system/binaries/ts_osfpal boot_osflags=root=/dev/hda1 console=ttyS0 readfile=tests/halt.sh +symbolfile= init_param=0 system_type=34 system_rev=1024 @@ -86,6 +87,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu0.itb @@ -113,6 +115,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu1.itb @@ -492,6 +495,7 @@ bus_id=0 [trace] flags= start=0 +cycle=0 bufsize=0 file=cout dump_on_exit=false @@ -535,6 +539,9 @@ trace_system=client [debug] break_cycles= +[statsreset] +reset_cycle=0 + [pseudo_inst] quiesce=true statistics=true diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console index c3c7b2676..4a397ddbf 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console @@ -74,7 +74,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb hdb: M5 IDE Disk, ATA DISK drive ide0 at 0x8410-0x8417,0x8422 on irq 31 hda: max request size: 128KiB - hda: 163296 sectors (83 MB), CHS=162/16/63, UDMA(33) + hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: hda1 hdb: max request size: 128KiB hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) @@ -102,10 +102,6 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb All bugs added by David S. Miller VFS: Mounted root (ext2 filesystem) readonly. Freeing unused kernel memory: 480k freed - init started: BusyBox v1.00-rc2 (2004.11.18-16:22+0000) multi-call binary - -PTXdist-0.7.0 (2004-11-18T11:23:40-0500) - + init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary mounting filesystems... -EXT2-fs warning: checktime reached, running e2fsck is recommended - loading script... +loading script... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt index c7715aeac..376929ebb 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt @@ -1,239 +1,232 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 1382023 # Simulator instruction rate (inst/s) -host_mem_usage 194588 # Number of bytes of host memory used -host_seconds 45.78 # Real time elapsed on the host -host_tick_rate 77681401 # Simulator tick rate (ticks/s) +host_inst_rate 1361363 # Simulator instruction rate (inst/s) +host_mem_usage 194440 # Number of bytes of host memory used +host_seconds 45.04 # Real time elapsed on the host +host_tick_rate 78691874 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 63264995 # Number of instructions simulated -sim_seconds 1.778030 # Number of seconds simulated -sim_ticks 3556060806 # Number of ticks simulated -system.cpu0.dtb.accesses 1831687 # DTB accesses -system.cpu0.dtb.acv 360 # DTB access violations -system.cpu0.dtb.hits 12876975 # DTB hits -system.cpu0.dtb.misses 11050 # DTB misses -system.cpu0.dtb.read_accesses 495437 # DTB read accesses -system.cpu0.dtb.read_acv 219 # DTB read access violations -system.cpu0.dtb.read_hits 7121424 # DTB read hits -system.cpu0.dtb.read_misses 9036 # DTB read misses -system.cpu0.dtb.write_accesses 1336250 # DTB write accesses -system.cpu0.dtb.write_acv 141 # DTB write access violations -system.cpu0.dtb.write_hits 5755551 # DTB write hits -system.cpu0.dtb.write_misses 2014 # DTB write misses -system.cpu0.idle_fraction 0.984569 # Percentage of idle cycles -system.cpu0.itb.accesses 2328068 # ITB accesses -system.cpu0.itb.acv 216 # ITB acv -system.cpu0.itb.hits 2323500 # ITB hits -system.cpu0.itb.misses 4568 # ITB misses -system.cpu0.kern.callpal 179206 # number of callpals executed +sim_insts 61314617 # Number of instructions simulated +sim_seconds 1.772124 # Number of seconds simulated +sim_ticks 3544247159 # Number of ticks simulated +system.cpu0.dtb.accesses 1850344 # DTB accesses +system.cpu0.dtb.acv 301 # DTB access violations +system.cpu0.dtb.hits 12691711 # DTB hits +system.cpu0.dtb.misses 8349 # DTB misses +system.cpu0.dtb.read_accesses 509385 # DTB read accesses +system.cpu0.dtb.read_acv 184 # DTB read access violations +system.cpu0.dtb.read_hits 7018751 # DTB read hits +system.cpu0.dtb.read_misses 6579 # DTB read misses +system.cpu0.dtb.write_accesses 1340959 # DTB write accesses +system.cpu0.dtb.write_acv 117 # DTB write access violations +system.cpu0.dtb.write_hits 5672960 # DTB write hits +system.cpu0.dtb.write_misses 1770 # DTB write misses +system.cpu0.idle_fraction 0.984893 # Percentage of idle cycles +system.cpu0.itb.accesses 1981604 # ITB accesses +system.cpu0.itb.acv 161 # ITB acv +system.cpu0.itb.hits 1978255 # ITB hits +system.cpu0.itb.misses 3349 # ITB misses +system.cpu0.kern.callpal 176688 # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu0.kern.callpal_wripir 91 0.05% 0.05% # number of callpals executed -system.cpu0.kern.callpal_wrmces 1 0.00% 0.05% # number of callpals executed -system.cpu0.kern.callpal_wrfen 1 0.00% 0.05% # number of callpals executed -system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.05% # number of callpals executed -system.cpu0.kern.callpal_swpctx 1375 0.77% 0.82% # number of callpals executed -system.cpu0.kern.callpal_tbi 20 0.01% 0.83% # number of callpals executed -system.cpu0.kern.callpal_wrent 7 0.00% 0.84% # number of callpals executed -system.cpu0.kern.callpal_swpipl 168681 94.13% 94.96% # number of callpals executed -system.cpu0.kern.callpal_rdps 4713 2.63% 97.59% # number of callpals executed -system.cpu0.kern.callpal_wrkgp 1 0.00% 97.59% # number of callpals executed -system.cpu0.kern.callpal_wrusp 4 0.00% 97.59% # number of callpals executed -system.cpu0.kern.callpal_rdusp 11 0.01% 97.60% # number of callpals executed -system.cpu0.kern.callpal_whami 2 0.00% 97.60% # number of callpals executed -system.cpu0.kern.callpal_rti 3639 2.03% 99.63% # number of callpals executed -system.cpu0.kern.callpal_callsys 461 0.26% 99.89% # number of callpals executed -system.cpu0.kern.callpal_imb 197 0.11% 100.00% # number of callpals executed +system.cpu0.kern.callpal_wripir 97 0.05% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrmces 1 0.00% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrfen 1 0.00% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.06% # number of callpals executed +system.cpu0.kern.callpal_swpctx 1117 0.63% 0.69% # number of callpals executed +system.cpu0.kern.callpal_tbi 44 0.02% 0.71% # number of callpals executed +system.cpu0.kern.callpal_wrent 7 0.00% 0.72% # number of callpals executed +system.cpu0.kern.callpal_swpipl 166811 94.41% 95.13% # number of callpals executed +system.cpu0.kern.callpal_rdps 4911 2.78% 97.91% # number of callpals executed +system.cpu0.kern.callpal_wrkgp 1 0.00% 97.91% # number of callpals executed +system.cpu0.kern.callpal_wrusp 3 0.00% 97.91% # number of callpals executed +system.cpu0.kern.callpal_rdusp 9 0.01% 97.91% # number of callpals executed +system.cpu0.kern.callpal_whami 2 0.00% 97.92% # number of callpals executed +system.cpu0.kern.callpal_rti 3236 1.83% 99.75% # number of callpals executed +system.cpu0.kern.callpal_callsys 325 0.18% 99.93% # number of callpals executed +system.cpu0.kern.callpal_imb 121 0.07% 100.00% # number of callpals executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed -system.cpu0.kern.inst.hwrei 197512 # number of hwrei instructions executed +system.cpu0.kern.inst.hwrei 190918 # number of hwrei instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu0.kern.inst.quiesce 1917 # number of quiesce instructions executed -system.cpu0.kern.ipl_count 174431 # number of times we switched to this ipl -system.cpu0.kern.ipl_count_0 73383 42.07% 42.07% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_21 286 0.16% 42.23% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_22 5540 3.18% 45.41% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_30 8 0.00% 45.41% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_31 95214 54.59% 100.00% # number of times we switched to this ipl -system.cpu0.kern.ipl_good 156222 # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_0 73336 46.94% 46.94% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_21 286 0.18% 47.13% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_22 5540 3.55% 50.67% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_30 8 0.01% 50.68% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_31 77052 49.32% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_ticks 3555570558 # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_0 3533670973 99.38% 99.38% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_21 45785 0.00% 99.39% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_22 1008642 0.03% 99.41% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_30 1988 0.00% 99.41% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_31 20843170 0.59% 100.00% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_used 0.895609 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_0 0.999360 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.inst.quiesce 1922 # number of quiesce instructions executed +system.cpu0.kern.ipl_count 172116 # number of times we switched to this ipl +system.cpu0.kern.ipl_count_0 72060 41.87% 41.87% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_21 251 0.15% 42.01% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_22 5518 3.21% 45.22% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_30 7 0.00% 45.22% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_31 94280 54.78% 100.00% # number of times we switched to this ipl +system.cpu0.kern.ipl_good 153515 # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_0 72019 46.91% 46.91% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_21 251 0.16% 47.08% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_22 5518 3.59% 50.67% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_30 7 0.00% 50.68% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_31 75720 49.32% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_ticks 3543835079 # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_0 3521923327 99.38% 99.38% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_21 39982 0.00% 99.38% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_22 1005040 0.03% 99.41% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_30 1756 0.00% 99.41% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_31 20864974 0.59% 100.00% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_used 0.891928 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.ipl_used_0 0.999431 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_31 0.809251 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.mode_good_kernel 1633 -system.cpu0.kern.mode_good_user 1486 -system.cpu0.kern.mode_good_idle 147 -system.cpu0.kern.mode_switch_kernel 2898 # number of protection mode switches -system.cpu0.kern.mode_switch_user 1486 # number of protection mode switches -system.cpu0.kern.mode_switch_idle 2090 # number of protection mode switches -system.cpu0.kern.mode_switch_good 0.504479 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_kernel 0.563492 # fraction of useful protection mode switches +system.cpu0.kern.ipl_used_31 0.803140 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.mode_good_kernel 1277 +system.cpu0.kern.mode_good_user 1129 +system.cpu0.kern.mode_good_idle 148 +system.cpu0.kern.mode_switch_kernel 2253 # number of protection mode switches +system.cpu0.kern.mode_switch_user 1129 # number of protection mode switches +system.cpu0.kern.mode_switch_idle 2074 # number of protection mode switches +system.cpu0.kern.mode_switch_good 0.468109 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good_kernel 0.566800 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_idle 0.070335 # fraction of useful protection mode switches -system.cpu0.kern.mode_ticks_kernel 29671488 0.83% 0.83% # number of ticks spent at the given mode -system.cpu0.kern.mode_ticks_user 2605758 0.07% 0.91% # number of ticks spent at the given mode -system.cpu0.kern.mode_ticks_idle 3523245106 99.09% 100.00% # number of ticks spent at the given mode -system.cpu0.kern.swap_context 1376 # number of times the context was actually changed -system.cpu0.kern.syscall 312 # number of syscalls executed -system.cpu0.kern.syscall_fork 9 2.88% 2.88% # number of syscalls executed -system.cpu0.kern.syscall_read 20 6.41% 9.29% # number of syscalls executed -system.cpu0.kern.syscall_write 6 1.92% 11.22% # number of syscalls executed -system.cpu0.kern.syscall_close 36 11.54% 22.76% # number of syscalls executed -system.cpu0.kern.syscall_chdir 1 0.32% 23.08% # number of syscalls executed -system.cpu0.kern.syscall_chmod 1 0.32% 23.40% # number of syscalls executed -system.cpu0.kern.syscall_obreak 26 8.33% 31.73% # number of syscalls executed -system.cpu0.kern.syscall_lseek 9 2.88% 34.62% # number of syscalls executed -system.cpu0.kern.syscall_getpid 8 2.56% 37.18% # number of syscalls executed -system.cpu0.kern.syscall_setuid 2 0.64% 37.82% # number of syscalls executed -system.cpu0.kern.syscall_getuid 4 1.28% 39.10% # number of syscalls executed -system.cpu0.kern.syscall_access 4 1.28% 40.38% # number of syscalls executed -system.cpu0.kern.syscall_dup 4 1.28% 41.67% # number of syscalls executed -system.cpu0.kern.syscall_open 40 12.82% 54.49% # number of syscalls executed -system.cpu0.kern.syscall_getgid 4 1.28% 55.77% # number of syscalls executed -system.cpu0.kern.syscall_sigprocmask 12 3.85% 59.62% # number of syscalls executed -system.cpu0.kern.syscall_ioctl 13 4.17% 63.78% # number of syscalls executed -system.cpu0.kern.syscall_readlink 1 0.32% 64.10% # number of syscalls executed -system.cpu0.kern.syscall_execve 7 2.24% 66.35% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_stat 22 7.05% 73.40% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_lstat 1 0.32% 73.72% # number of syscalls executed -system.cpu0.kern.syscall_mmap 28 8.97% 82.69% # number of syscalls executed -system.cpu0.kern.syscall_munmap 4 1.28% 83.97% # number of syscalls executed -system.cpu0.kern.syscall_mprotect 7 2.24% 86.22% # number of syscalls executed -system.cpu0.kern.syscall_gethostname 1 0.32% 86.54% # number of syscalls executed -system.cpu0.kern.syscall_dup2 3 0.96% 87.50% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_fstat 15 4.81% 92.31% # number of syscalls executed -system.cpu0.kern.syscall_fcntl 11 3.53% 95.83% # number of syscalls executed -system.cpu0.kern.syscall_socket 3 0.96% 96.79% # number of syscalls executed -system.cpu0.kern.syscall_connect 3 0.96% 97.76% # number of syscalls executed -system.cpu0.kern.syscall_setgid 2 0.64% 98.40% # number of syscalls executed -system.cpu0.kern.syscall_getrlimit 2 0.64% 99.04% # number of syscalls executed -system.cpu0.kern.syscall_setsid 3 0.96% 100.00% # number of syscalls executed -system.cpu0.not_idle_fraction 0.015431 # Percentage of non-idle cycles -system.cpu0.numCycles 54873632 # number of cpu cycles simulated -system.cpu0.num_insts 54868848 # Number of instructions executed -system.cpu0.num_refs 12918621 # Number of memory references -system.cpu1.dtb.accesses 524398 # DTB accesses -system.cpu1.dtb.acv 60 # DTB access violations -system.cpu1.dtb.hits 2058922 # DTB hits -system.cpu1.dtb.misses 5263 # DTB misses -system.cpu1.dtb.read_accesses 337746 # DTB read accesses -system.cpu1.dtb.read_acv 23 # DTB read access violations -system.cpu1.dtb.read_hits 1301369 # DTB read hits -system.cpu1.dtb.read_misses 4766 # DTB read misses -system.cpu1.dtb.write_accesses 186652 # DTB write accesses -system.cpu1.dtb.write_acv 37 # DTB write access violations -system.cpu1.dtb.write_hits 757553 # DTB write hits -system.cpu1.dtb.write_misses 497 # DTB write misses -system.cpu1.idle_fraction 0.997638 # Percentage of idle cycles -system.cpu1.itb.accesses 1711917 # ITB accesses +system.cpu0.kern.mode_switch_good_idle 0.071360 # fraction of useful protection mode switches +system.cpu0.kern.mode_ticks_kernel 28710240 0.81% 0.81% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_user 2184201 0.06% 0.87% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_idle 3512891779 99.13% 100.00% # number of ticks spent at the given mode +system.cpu0.kern.swap_context 1118 # number of times the context was actually changed +system.cpu0.kern.syscall 192 # number of syscalls executed +system.cpu0.kern.syscall_fork 7 3.65% 3.65% # number of syscalls executed +system.cpu0.kern.syscall_read 13 6.77% 10.42% # number of syscalls executed +system.cpu0.kern.syscall_write 4 2.08% 12.50% # number of syscalls executed +system.cpu0.kern.syscall_close 28 14.58% 27.08% # number of syscalls executed +system.cpu0.kern.syscall_chdir 1 0.52% 27.60% # number of syscalls executed +system.cpu0.kern.syscall_obreak 7 3.65% 31.25% # number of syscalls executed +system.cpu0.kern.syscall_lseek 6 3.12% 34.37% # number of syscalls executed +system.cpu0.kern.syscall_getpid 4 2.08% 36.46% # number of syscalls executed +system.cpu0.kern.syscall_setuid 1 0.52% 36.98% # number of syscalls executed +system.cpu0.kern.syscall_getuid 3 1.56% 38.54% # number of syscalls executed +system.cpu0.kern.syscall_access 7 3.65% 42.19% # number of syscalls executed +system.cpu0.kern.syscall_dup 2 1.04% 43.23% # number of syscalls executed +system.cpu0.kern.syscall_open 34 17.71% 60.94% # number of syscalls executed +system.cpu0.kern.syscall_getgid 3 1.56% 62.50% # number of syscalls executed +system.cpu0.kern.syscall_sigprocmask 8 4.17% 66.67% # number of syscalls executed +system.cpu0.kern.syscall_ioctl 9 4.69% 71.35% # number of syscalls executed +system.cpu0.kern.syscall_readlink 1 0.52% 71.87% # number of syscalls executed +system.cpu0.kern.syscall_execve 5 2.60% 74.48% # number of syscalls executed +system.cpu0.kern.syscall_mmap 22 11.46% 85.94% # number of syscalls executed +system.cpu0.kern.syscall_munmap 2 1.04% 86.98% # number of syscalls executed +system.cpu0.kern.syscall_mprotect 6 3.12% 90.10% # number of syscalls executed +system.cpu0.kern.syscall_gethostname 1 0.52% 90.62% # number of syscalls executed +system.cpu0.kern.syscall_dup2 2 1.04% 91.67% # number of syscalls executed +system.cpu0.kern.syscall_fcntl 8 4.17% 95.83% # number of syscalls executed +system.cpu0.kern.syscall_socket 2 1.04% 96.87% # number of syscalls executed +system.cpu0.kern.syscall_connect 2 1.04% 97.92% # number of syscalls executed +system.cpu0.kern.syscall_setgid 1 0.52% 98.44% # number of syscalls executed +system.cpu0.kern.syscall_getrlimit 1 0.52% 98.96% # number of syscalls executed +system.cpu0.kern.syscall_setsid 2 1.04% 100.00% # number of syscalls executed +system.cpu0.not_idle_fraction 0.015107 # Percentage of non-idle cycles +system.cpu0.numCycles 53543489 # number of cpu cycles simulated +system.cpu0.num_insts 53539979 # Number of instructions executed +system.cpu0.num_refs 12727196 # Number of memory references +system.cpu1.dtb.accesses 460215 # DTB accesses +system.cpu1.dtb.acv 72 # DTB access violations +system.cpu1.dtb.hits 2012555 # DTB hits +system.cpu1.dtb.misses 4236 # DTB misses +system.cpu1.dtb.read_accesses 319867 # DTB read accesses +system.cpu1.dtb.read_acv 26 # DTB read access violations +system.cpu1.dtb.read_hits 1276251 # DTB read hits +system.cpu1.dtb.read_misses 3800 # DTB read misses +system.cpu1.dtb.write_accesses 140348 # DTB write accesses +system.cpu1.dtb.write_acv 46 # DTB write access violations +system.cpu1.dtb.write_hits 736304 # DTB write hits +system.cpu1.dtb.write_misses 436 # DTB write misses +system.cpu1.idle_fraction 0.997806 # Percentage of idle cycles +system.cpu1.itb.accesses 1302484 # ITB accesses system.cpu1.itb.acv 23 # ITB acv -system.cpu1.itb.hits 1709682 # ITB hits -system.cpu1.itb.misses 2235 # ITB misses -system.cpu1.kern.callpal 25990 # number of callpals executed +system.cpu1.itb.hits 1300768 # ITB hits +system.cpu1.itb.misses 1716 # ITB misses +system.cpu1.kern.callpal 27118 # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu1.kern.callpal_wripir 8 0.03% 0.03% # number of callpals executed -system.cpu1.kern.callpal_wrmces 1 0.00% 0.04% # number of callpals executed +system.cpu1.kern.callpal_wripir 7 0.03% 0.03% # number of callpals executed +system.cpu1.kern.callpal_wrmces 1 0.00% 0.03% # number of callpals executed system.cpu1.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed -system.cpu1.kern.callpal_swpctx 554 2.13% 2.17% # number of callpals executed -system.cpu1.kern.callpal_tbi 7 0.03% 2.20% # number of callpals executed -system.cpu1.kern.callpal_wrent 7 0.03% 2.23% # number of callpals executed -system.cpu1.kern.callpal_swpipl 22366 86.06% 88.28% # number of callpals executed -system.cpu1.kern.callpal_rdps 98 0.38% 88.66% # number of callpals executed -system.cpu1.kern.callpal_wrkgp 1 0.00% 88.66% # number of callpals executed -system.cpu1.kern.callpal_wrusp 4 0.02% 88.68% # number of callpals executed -system.cpu1.kern.callpal_rdusp 1 0.00% 88.68% # number of callpals executed -system.cpu1.kern.callpal_whami 3 0.01% 88.70% # number of callpals executed -system.cpu1.kern.callpal_rti 2613 10.05% 98.75% # number of callpals executed -system.cpu1.kern.callpal_callsys 208 0.80% 99.55% # number of callpals executed -system.cpu1.kern.callpal_imb 116 0.45% 100.00% # number of callpals executed +system.cpu1.kern.callpal_swpctx 515 1.90% 1.94% # number of callpals executed +system.cpu1.kern.callpal_tbi 10 0.04% 1.97% # number of callpals executed +system.cpu1.kern.callpal_wrent 7 0.03% 2.00% # number of callpals executed +system.cpu1.kern.callpal_swpipl 23496 86.64% 88.64% # number of callpals executed +system.cpu1.kern.callpal_rdps 251 0.93% 89.57% # number of callpals executed +system.cpu1.kern.callpal_wrkgp 1 0.00% 89.57% # number of callpals executed +system.cpu1.kern.callpal_wrusp 4 0.01% 89.59% # number of callpals executed +system.cpu1.kern.callpal_rdusp 1 0.00% 89.59% # number of callpals executed +system.cpu1.kern.callpal_whami 3 0.01% 89.60% # number of callpals executed +system.cpu1.kern.callpal_rti 2552 9.41% 99.01% # number of callpals executed +system.cpu1.kern.callpal_callsys 208 0.77% 99.78% # number of callpals executed +system.cpu1.kern.callpal_imb 59 0.22% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed -system.cpu1.kern.inst.hwrei 35475 # number of hwrei instructions executed +system.cpu1.kern.inst.hwrei 35069 # number of hwrei instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu1.kern.inst.quiesce 1946 # number of quiesce instructions executed -system.cpu1.kern.ipl_count 26882 # number of times we switched to this ipl -system.cpu1.kern.ipl_count_0 9636 35.85% 35.85% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_22 5504 20.47% 56.32% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_30 91 0.34% 56.66% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_31 11651 43.34% 100.00% # number of times we switched to this ipl -system.cpu1.kern.ipl_good 26602 # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_0 9607 36.11% 36.11% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_22 5504 20.69% 56.80% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_30 91 0.34% 57.15% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_31 11400 42.85% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_ticks 3556060349 # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_0 3533823708 99.37% 99.37% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_22 1040434 0.03% 99.40% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_30 23860 0.00% 99.40% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_31 21172347 0.60% 100.00% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_used 0.989584 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_0 0.996990 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.inst.quiesce 1947 # number of quiesce instructions executed +system.cpu1.kern.ipl_count 27951 # number of times we switched to this ipl +system.cpu1.kern.ipl_count_0 10084 36.08% 36.08% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_22 5485 19.62% 55.70% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_30 97 0.35% 56.05% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_31 12285 43.95% 100.00% # number of times we switched to this ipl +system.cpu1.kern.ipl_good 27484 # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_0 10061 36.61% 36.61% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_22 5485 19.96% 56.56% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_30 97 0.35% 56.92% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_31 11841 43.08% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_ticks 3544246744 # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_0 3521927913 99.37% 99.37% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_22 1037048 0.03% 99.40% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_30 25211 0.00% 99.40% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_31 21256572 0.60% 100.00% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_used 0.983292 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.ipl_used_0 0.997719 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_31 0.978457 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.mode_good_kernel 691 -system.cpu1.kern.mode_good_user 692 +system.cpu1.kern.ipl_used_31 0.963858 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.mode_good_kernel 636 +system.cpu1.kern.mode_good_user 637 system.cpu1.kern.mode_good_idle 0 -system.cpu1.kern.mode_switch_kernel 3163 # number of protection mode switches -system.cpu1.kern.mode_switch_user 692 # number of protection mode switches +system.cpu1.kern.mode_switch_kernel 3063 # number of protection mode switches +system.cpu1.kern.mode_switch_user 637 # number of protection mode switches system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches -system.cpu1.kern.mode_switch_good 0.358755 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_kernel 0.218463 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good 0.344054 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good_kernel 0.207640 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_idle # fraction of useful protection mode switches -system.cpu1.kern.mode_ticks_kernel 3554209770 99.95% 99.95% # number of ticks spent at the given mode -system.cpu1.kern.mode_ticks_user 1850577 0.05% 100.00% # number of ticks spent at the given mode +system.cpu1.kern.mode_ticks_kernel 3542834137 99.96% 99.96% # number of ticks spent at the given mode +system.cpu1.kern.mode_ticks_user 1412605 0.04% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode -system.cpu1.kern.swap_context 555 # number of times the context was actually changed -system.cpu1.kern.syscall 163 # number of syscalls executed -system.cpu1.kern.syscall_fork 1 0.61% 0.61% # number of syscalls executed -system.cpu1.kern.syscall_read 13 7.98% 8.59% # number of syscalls executed -system.cpu1.kern.syscall_write 1 0.61% 9.20% # number of syscalls executed -system.cpu1.kern.syscall_close 13 7.98% 17.18% # number of syscalls executed -system.cpu1.kern.syscall_obreak 18 11.04% 28.22% # number of syscalls executed -system.cpu1.kern.syscall_lseek 4 2.45% 30.67% # number of syscalls executed -system.cpu1.kern.syscall_getpid 2 1.23% 31.90% # number of syscalls executed -system.cpu1.kern.syscall_setuid 2 1.23% 33.13% # number of syscalls executed -system.cpu1.kern.syscall_getuid 4 2.45% 35.58% # number of syscalls executed -system.cpu1.kern.syscall_open 28 17.18% 52.76% # number of syscalls executed -system.cpu1.kern.syscall_getgid 4 2.45% 55.21% # number of syscalls executed -system.cpu1.kern.syscall_sigprocmask 2 1.23% 56.44% # number of syscalls executed -system.cpu1.kern.syscall_ioctl 3 1.84% 58.28% # number of syscalls executed -system.cpu1.kern.syscall_readlink 1 0.61% 58.90% # number of syscalls executed -system.cpu1.kern.syscall_execve 1 0.61% 59.51% # number of syscalls executed -system.cpu1.kern.syscall_pre_F64_stat 9 5.52% 65.03% # number of syscalls executed -system.cpu1.kern.syscall_mmap 27 16.56% 81.60% # number of syscalls executed -system.cpu1.kern.syscall_munmap 2 1.23% 82.82% # number of syscalls executed -system.cpu1.kern.syscall_mprotect 7 4.29% 87.12% # number of syscalls executed -system.cpu1.kern.syscall_gethostname 1 0.61% 87.73% # number of syscalls executed -system.cpu1.kern.syscall_dup2 1 0.61% 88.34% # number of syscalls executed -system.cpu1.kern.syscall_pre_F64_fstat 13 7.98% 96.32% # number of syscalls executed -system.cpu1.kern.syscall_fcntl 3 1.84% 98.16% # number of syscalls executed -system.cpu1.kern.syscall_setgid 2 1.23% 99.39% # number of syscalls executed -system.cpu1.kern.syscall_getrlimit 1 0.61% 100.00% # number of syscalls executed -system.cpu1.not_idle_fraction 0.002362 # Percentage of non-idle cycles -system.cpu1.numCycles 8398405 # number of cpu cycles simulated -system.cpu1.num_insts 8396147 # Number of instructions executed -system.cpu1.num_refs 2073144 # Number of memory references +system.cpu1.kern.swap_context 516 # number of times the context was actually changed +system.cpu1.kern.syscall 137 # number of syscalls executed +system.cpu1.kern.syscall_fork 1 0.73% 0.73% # number of syscalls executed +system.cpu1.kern.syscall_read 17 12.41% 13.14% # number of syscalls executed +system.cpu1.kern.syscall_close 15 10.95% 24.09% # number of syscalls executed +system.cpu1.kern.syscall_chmod 1 0.73% 24.82% # number of syscalls executed +system.cpu1.kern.syscall_obreak 8 5.84% 30.66% # number of syscalls executed +system.cpu1.kern.syscall_lseek 4 2.92% 33.58% # number of syscalls executed +system.cpu1.kern.syscall_getpid 2 1.46% 35.04% # number of syscalls executed +system.cpu1.kern.syscall_setuid 3 2.19% 37.23% # number of syscalls executed +system.cpu1.kern.syscall_getuid 3 2.19% 39.42% # number of syscalls executed +system.cpu1.kern.syscall_access 4 2.92% 42.34% # number of syscalls executed +system.cpu1.kern.syscall_open 21 15.33% 57.66% # number of syscalls executed +system.cpu1.kern.syscall_getgid 3 2.19% 59.85% # number of syscalls executed +system.cpu1.kern.syscall_sigprocmask 2 1.46% 61.31% # number of syscalls executed +system.cpu1.kern.syscall_ioctl 1 0.73% 62.04% # number of syscalls executed +system.cpu1.kern.syscall_execve 2 1.46% 63.50% # number of syscalls executed +system.cpu1.kern.syscall_mmap 32 23.36% 86.86% # number of syscalls executed +system.cpu1.kern.syscall_munmap 1 0.73% 87.59% # number of syscalls executed +system.cpu1.kern.syscall_mprotect 10 7.30% 94.89% # number of syscalls executed +system.cpu1.kern.syscall_dup2 1 0.73% 95.62% # number of syscalls executed +system.cpu1.kern.syscall_fcntl 2 1.46% 97.08% # number of syscalls executed +system.cpu1.kern.syscall_setgid 3 2.19% 99.27% # number of syscalls executed +system.cpu1.kern.syscall_getrlimit 1 0.73% 100.00% # number of syscalls executed +system.cpu1.not_idle_fraction 0.002194 # Percentage of non-idle cycles +system.cpu1.numCycles 7776377 # number of cpu cycles simulated +system.cpu1.num_insts 7774638 # Number of instructions executed +system.cpu1.num_refs 2025195 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). -system.disk0.dma_write_bytes 2521088 # Number of bytes transfered via DMA writes. -system.disk0.dma_write_full_pages 285 # Number of full page size DMA writes. -system.disk0.dma_write_txs 375 # Number of DMA write transactions. +system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. +system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. +system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout index c8330eef2..039088577 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Sep 5 2006 15:32:34 -M5 started Tue Sep 5 15:43:12 2006 +M5 compiled Oct 5 2006 22:13:02 +M5 started Fri Oct 6 00:24:12 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual -Exiting @ tick 3556060806 because m5_exit instruction encountered +Exiting @ tick 3544247159 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini index c017495f6..e30428078 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini @@ -58,6 +58,7 @@ mem_mode=atomic pal=/dist/m5/system/binaries/ts_osfpal physmem=system.physmem readfile=tests/halt.sh +symbolfile= system_rev=1024 system_type=34 @@ -86,6 +87,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 simulate_stalls=false system=system width=1 @@ -554,6 +556,7 @@ pio=system.iobus.port[24] [trace] bufsize=0 +cycle=0 dump_on_exit=false file=cout flags= diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out index 018308862..ea63dce8b 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out @@ -21,6 +21,7 @@ console=/dist/m5/system/binaries/console pal=/dist/m5/system/binaries/ts_osfpal boot_osflags=root=/dev/hda1 console=ttyS0 readfile=tests/halt.sh +symbolfile= init_param=0 system_type=34 system_rev=1024 @@ -86,6 +87,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu.itb @@ -465,6 +467,7 @@ bus_id=0 [trace] flags= start=0 +cycle=0 bufsize=0 file=cout dump_on_exit=false @@ -508,6 +511,9 @@ trace_system=client [debug] break_cycles= +[statsreset] +reset_cycle=0 + [pseudo_inst] quiesce=true statistics=true diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console index ea7a20777..d6e3955cc 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console @@ -69,7 +69,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 hdb: M5 IDE Disk, ATA DISK drive ide0 at 0x8410-0x8417,0x8422 on irq 31 hda: max request size: 128KiB - hda: 163296 sectors (83 MB), CHS=162/16/63, UDMA(33) + hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: hda1 hdb: max request size: 128KiB hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) @@ -97,10 +97,6 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 All bugs added by David S. Miller VFS: Mounted root (ext2 filesystem) readonly. Freeing unused kernel memory: 480k freed - init started: BusyBox v1.00-rc2 (2004.11.18-16:22+0000) multi-call binary - -PTXdist-0.7.0 (2004-11-18T11:23:40-0500) - + init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary mounting filesystems... -EXT2-fs warning: checktime reached, running e2fsck is recommended - loading script... +loading script... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt index 3a7dc1cd4..5c403c0a9 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt @@ -1,130 +1,127 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 1346129 # Simulator instruction rate (inst/s) -host_mem_usage 194392 # Number of bytes of host memory used -host_seconds 44.52 # Real time elapsed on the host -host_tick_rate 78470813 # Simulator tick rate (ticks/s) +host_inst_rate 1121378 # Simulator instruction rate (inst/s) +host_mem_usage 194272 # Number of bytes of host memory used +host_seconds 51.72 # Real time elapsed on the host +host_tick_rate 67313414 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 59929520 # Number of instructions simulated -sim_seconds 1.746773 # Number of seconds simulated -sim_ticks 3493545624 # Number of ticks simulated -system.cpu.dtb.accesses 2354955 # DTB accesses -system.cpu.dtb.acv 413 # DTB access violations -system.cpu.dtb.hits 13929995 # DTB hits -system.cpu.dtb.misses 16187 # DTB misses -system.cpu.dtb.read_accesses 832415 # DTB read accesses -system.cpu.dtb.read_acv 242 # DTB read access violations -system.cpu.dtb.read_hits 7718636 # DTB read hits -system.cpu.dtb.read_misses 13695 # DTB read misses -system.cpu.dtb.write_accesses 1522540 # DTB write accesses -system.cpu.dtb.write_acv 171 # DTB write access violations -system.cpu.dtb.write_hits 6211359 # DTB write hits -system.cpu.dtb.write_misses 2492 # DTB write misses -system.cpu.idle_fraction 0.982844 # Percentage of idle cycles -system.cpu.itb.accesses 4037380 # ITB accesses -system.cpu.itb.acv 239 # ITB acv -system.cpu.itb.hits 4030656 # ITB hits -system.cpu.itb.misses 6724 # ITB misses -system.cpu.kern.callpal 184022 # number of callpals executed +sim_insts 58001813 # Number of instructions simulated +sim_seconds 1.740863 # Number of seconds simulated +sim_ticks 3481726167 # Number of ticks simulated +system.cpu.dtb.accesses 2309470 # DTB accesses +system.cpu.dtb.acv 367 # DTB access violations +system.cpu.dtb.hits 13711941 # DTB hits +system.cpu.dtb.misses 12493 # DTB misses +system.cpu.dtb.read_accesses 828530 # DTB read accesses +system.cpu.dtb.read_acv 210 # DTB read access violations +system.cpu.dtb.read_hits 7597829 # DTB read hits +system.cpu.dtb.read_misses 10298 # DTB read misses +system.cpu.dtb.write_accesses 1480940 # DTB write accesses +system.cpu.dtb.write_acv 157 # DTB write access violations +system.cpu.dtb.write_hits 6114112 # DTB write hits +system.cpu.dtb.write_misses 2195 # DTB write misses +system.cpu.idle_fraction 0.983340 # Percentage of idle cycles +system.cpu.itb.accesses 3281346 # ITB accesses +system.cpu.itb.acv 184 # ITB acv +system.cpu.itb.hits 3276356 # ITB hits +system.cpu.itb.misses 4990 # ITB misses +system.cpu.kern.callpal 182718 # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed -system.cpu.kern.callpal_swpctx 1864 1.01% 1.02% # number of callpals executed -system.cpu.kern.callpal_tbi 28 0.02% 1.03% # number of callpals executed -system.cpu.kern.callpal_wrent 7 0.00% 1.03% # number of callpals executed -system.cpu.kern.callpal_swpipl 172016 93.48% 94.51% # number of callpals executed -system.cpu.kern.callpal_rdps 4808 2.61% 97.12% # number of callpals executed -system.cpu.kern.callpal_wrkgp 1 0.00% 97.12% # number of callpals executed -system.cpu.kern.callpal_wrusp 8 0.00% 97.13% # number of callpals executed -system.cpu.kern.callpal_rdusp 12 0.01% 97.13% # number of callpals executed -system.cpu.kern.callpal_whami 2 0.00% 97.14% # number of callpals executed -system.cpu.kern.callpal_rti 4291 2.33% 99.47% # number of callpals executed -system.cpu.kern.callpal_callsys 667 0.36% 99.83% # number of callpals executed -system.cpu.kern.callpal_imb 314 0.17% 100.00% # number of callpals executed +system.cpu.kern.callpal_swpctx 1574 0.86% 0.86% # number of callpals executed +system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed +system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed +system.cpu.kern.callpal_swpipl 171359 93.78% 94.68% # number of callpals executed +system.cpu.kern.callpal_rdps 5159 2.82% 97.50% # number of callpals executed +system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed +system.cpu.kern.callpal_wrusp 7 0.00% 97.51% # number of callpals executed +system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed +system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed +system.cpu.kern.callpal_rti 3829 2.10% 99.61% # number of callpals executed +system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed +system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed system.cpu.kern.inst.arm 0 # number of arm instructions executed -system.cpu.kern.inst.hwrei 209657 # number of hwrei instructions executed +system.cpu.kern.inst.hwrei 202783 # number of hwrei instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu.kern.inst.quiesce 1868 # number of quiesce instructions executed -system.cpu.kern.ipl_count 178378 # number of times we switched to this ipl -system.cpu.kern.ipl_count_0 75463 42.31% 42.31% # number of times we switched to this ipl -system.cpu.kern.ipl_count_21 286 0.16% 42.47% # number of times we switched to this ipl -system.cpu.kern.ipl_count_22 5446 3.05% 45.52% # number of times we switched to this ipl -system.cpu.kern.ipl_count_31 97183 54.48% 100.00% # number of times we switched to this ipl -system.cpu.kern.ipl_good 160188 # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_0 75397 47.07% 47.07% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_21 286 0.18% 47.25% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_22 5446 3.40% 50.65% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_31 79059 49.35% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_ticks 3493545167 # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_0 3471576124 99.37% 99.37% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_21 45785 0.00% 99.37% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_22 934362 0.03% 99.40% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_31 20988896 0.60% 100.00% # number of cycles we spent at this ipl -system.cpu.kern.ipl_used 0.898026 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_0 0.999125 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.inst.quiesce 1877 # number of quiesce instructions executed +system.cpu.kern.ipl_count 177218 # number of times we switched to this ipl +system.cpu.kern.ipl_count_0 74624 42.11% 42.11% # number of times we switched to this ipl +system.cpu.kern.ipl_count_21 251 0.14% 42.25% # number of times we switched to this ipl +system.cpu.kern.ipl_count_22 5425 3.06% 45.31% # number of times we switched to this ipl +system.cpu.kern.ipl_count_31 96918 54.69% 100.00% # number of times we switched to this ipl +system.cpu.kern.ipl_good 158463 # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_0 74570 47.06% 47.06% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_21 251 0.16% 47.22% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_22 5425 3.42% 50.64% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_31 78217 49.36% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_ticks 3481725752 # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_0 3459659082 99.37% 99.37% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_21 39982 0.00% 99.37% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_22 930159 0.03% 99.39% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_31 21096529 0.61% 100.00% # number of cycles we spent at this ipl +system.cpu.kern.ipl_used 0.894170 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_used_0 0.999276 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_31 0.813506 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.mode_good_kernel 2342 -system.cpu.kern.mode_good_user 2171 -system.cpu.kern.mode_good_idle 171 -system.cpu.kern.mode_switch_kernel 4092 # number of protection mode switches -system.cpu.kern.mode_switch_user 2171 # number of protection mode switches -system.cpu.kern.mode_switch_idle 2041 # number of protection mode switches -system.cpu.kern.mode_switch_good 0.564066 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_kernel 0.572336 # fraction of useful protection mode switches +system.cpu.kern.ipl_used_31 0.807043 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.mode_good_kernel 1939 +system.cpu.kern.mode_good_user 1757 +system.cpu.kern.mode_good_idle 182 +system.cpu.kern.mode_switch_kernel 3320 # number of protection mode switches +system.cpu.kern.mode_switch_user 1757 # number of protection mode switches +system.cpu.kern.mode_switch_idle 2061 # number of protection mode switches +system.cpu.kern.mode_switch_good 0.543289 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_good_kernel 0.584036 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_idle 0.083782 # fraction of useful protection mode switches -system.cpu.kern.mode_ticks_kernel 33028385 0.95% 0.95% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_user 4450361 0.13% 1.07% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_idle 3456066419 98.93% 100.00% # number of ticks spent at the given mode -system.cpu.kern.swap_context 1865 # number of times the context was actually changed -system.cpu.kern.syscall 475 # number of syscalls executed -system.cpu.kern.syscall_fork 10 2.11% 2.11% # number of syscalls executed -system.cpu.kern.syscall_read 33 6.95% 9.05% # number of syscalls executed -system.cpu.kern.syscall_write 7 1.47% 10.53% # number of syscalls executed -system.cpu.kern.syscall_close 49 10.32% 20.84% # number of syscalls executed -system.cpu.kern.syscall_chdir 1 0.21% 21.05% # number of syscalls executed -system.cpu.kern.syscall_chmod 1 0.21% 21.26% # number of syscalls executed -system.cpu.kern.syscall_obreak 44 9.26% 30.53% # number of syscalls executed -system.cpu.kern.syscall_lseek 13 2.74% 33.26% # number of syscalls executed -system.cpu.kern.syscall_getpid 10 2.11% 35.37% # number of syscalls executed -system.cpu.kern.syscall_setuid 4 0.84% 36.21% # number of syscalls executed -system.cpu.kern.syscall_getuid 8 1.68% 37.89% # number of syscalls executed -system.cpu.kern.syscall_access 4 0.84% 38.74% # number of syscalls executed -system.cpu.kern.syscall_dup 4 0.84% 39.58% # number of syscalls executed -system.cpu.kern.syscall_open 68 14.32% 53.89% # number of syscalls executed -system.cpu.kern.syscall_getgid 8 1.68% 55.58% # number of syscalls executed -system.cpu.kern.syscall_sigprocmask 14 2.95% 58.53% # number of syscalls executed -system.cpu.kern.syscall_ioctl 16 3.37% 61.89% # number of syscalls executed -system.cpu.kern.syscall_readlink 2 0.42% 62.32% # number of syscalls executed -system.cpu.kern.syscall_execve 8 1.68% 64.00% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_stat 31 6.53% 70.53% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_lstat 1 0.21% 70.74% # number of syscalls executed -system.cpu.kern.syscall_mmap 55 11.58% 82.32% # number of syscalls executed -system.cpu.kern.syscall_munmap 6 1.26% 83.58% # number of syscalls executed -system.cpu.kern.syscall_mprotect 14 2.95% 86.53% # number of syscalls executed -system.cpu.kern.syscall_gethostname 2 0.42% 86.95% # number of syscalls executed -system.cpu.kern.syscall_dup2 4 0.84% 87.79% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_fstat 28 5.89% 93.68% # number of syscalls executed -system.cpu.kern.syscall_fcntl 14 2.95% 96.63% # number of syscalls executed -system.cpu.kern.syscall_socket 3 0.63% 97.26% # number of syscalls executed -system.cpu.kern.syscall_connect 3 0.63% 97.89% # number of syscalls executed -system.cpu.kern.syscall_setgid 4 0.84% 98.74% # number of syscalls executed -system.cpu.kern.syscall_getrlimit 3 0.63% 99.37% # number of syscalls executed -system.cpu.kern.syscall_setsid 3 0.63% 100.00% # number of syscalls executed -system.cpu.not_idle_fraction 0.017156 # Percentage of non-idle cycles -system.cpu.numCycles 59936483 # number of cpu cycles simulated -system.cpu.num_insts 59929520 # Number of instructions executed -system.cpu.num_refs 13982880 # Number of memory references +system.cpu.kern.mode_switch_good_idle 0.088307 # fraction of useful protection mode switches +system.cpu.kern.mode_ticks_kernel 31887159 0.92% 0.92% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_user 3591270 0.10% 1.02% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_idle 3446247321 98.98% 100.00% # number of ticks spent at the given mode +system.cpu.kern.swap_context 1575 # number of times the context was actually changed +system.cpu.kern.syscall 329 # number of syscalls executed +system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed +system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed +system.cpu.kern.syscall_write 4 1.22% 12.77% # number of syscalls executed +system.cpu.kern.syscall_close 43 13.07% 25.84% # number of syscalls executed +system.cpu.kern.syscall_chdir 1 0.30% 26.14% # number of syscalls executed +system.cpu.kern.syscall_chmod 1 0.30% 26.44% # number of syscalls executed +system.cpu.kern.syscall_obreak 15 4.56% 31.00% # number of syscalls executed +system.cpu.kern.syscall_lseek 10 3.04% 34.04% # number of syscalls executed +system.cpu.kern.syscall_getpid 6 1.82% 35.87% # number of syscalls executed +system.cpu.kern.syscall_setuid 4 1.22% 37.08% # number of syscalls executed +system.cpu.kern.syscall_getuid 6 1.82% 38.91% # number of syscalls executed +system.cpu.kern.syscall_access 11 3.34% 42.25% # number of syscalls executed +system.cpu.kern.syscall_dup 2 0.61% 42.86% # number of syscalls executed +system.cpu.kern.syscall_open 55 16.72% 59.57% # number of syscalls executed +system.cpu.kern.syscall_getgid 6 1.82% 61.40% # number of syscalls executed +system.cpu.kern.syscall_sigprocmask 10 3.04% 64.44% # number of syscalls executed +system.cpu.kern.syscall_ioctl 10 3.04% 67.48% # number of syscalls executed +system.cpu.kern.syscall_readlink 1 0.30% 67.78% # number of syscalls executed +system.cpu.kern.syscall_execve 7 2.13% 69.91% # number of syscalls executed +system.cpu.kern.syscall_mmap 54 16.41% 86.32% # number of syscalls executed +system.cpu.kern.syscall_munmap 3 0.91% 87.23% # number of syscalls executed +system.cpu.kern.syscall_mprotect 16 4.86% 92.10% # number of syscalls executed +system.cpu.kern.syscall_gethostname 1 0.30% 92.40% # number of syscalls executed +system.cpu.kern.syscall_dup2 3 0.91% 93.31% # number of syscalls executed +system.cpu.kern.syscall_fcntl 10 3.04% 96.35% # number of syscalls executed +system.cpu.kern.syscall_socket 2 0.61% 96.96% # number of syscalls executed +system.cpu.kern.syscall_connect 2 0.61% 97.57% # number of syscalls executed +system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed +system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed +system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed +system.cpu.not_idle_fraction 0.016660 # Percentage of non-idle cycles +system.cpu.numCycles 58006987 # number of cpu cycles simulated +system.cpu.num_insts 58001813 # Number of instructions executed +system.cpu.num_refs 13757191 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). -system.disk0.dma_write_bytes 2521088 # Number of bytes transfered via DMA writes. -system.disk0.dma_write_full_pages 285 # Number of full page size DMA writes. -system.disk0.dma_write_txs 375 # Number of DMA write transactions. +system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. +system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. +system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout index c04cd5050..b3b3e8704 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Sep 5 2006 15:32:34 -M5 started Tue Sep 5 15:42:26 2006 +M5 compiled Oct 5 2006 22:13:02 +M5 started Fri Oct 6 00:23:19 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic -Exiting @ tick 3493545624 because m5_exit instruction encountered +Exiting @ tick 3481726167 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini index 97e9007e7..65401b549 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini @@ -58,6 +58,7 @@ mem_mode=timing pal=/dist/m5/system/binaries/ts_osfpal physmem=system.physmem readfile=tests/halt.sh +symbolfile= system_rev=1024 system_type=34 @@ -86,6 +87,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 system=system dcache_port=system.membus.port[3] icache_port=system.membus.port[2] @@ -114,6 +116,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 system=system dcache_port=system.membus.port[5] icache_port=system.membus.port[4] @@ -580,6 +583,7 @@ pio=system.iobus.port[24] [trace] bufsize=0 +cycle=0 dump_on_exit=false file=cout flags= diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out index 96c734e15..ed03e445d 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out @@ -21,6 +21,7 @@ console=/dist/m5/system/binaries/console pal=/dist/m5/system/binaries/ts_osfpal boot_osflags=root=/dev/hda1 console=ttyS0 readfile=tests/halt.sh +symbolfile= init_param=0 system_type=34 system_rev=1024 @@ -86,6 +87,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu0.itb @@ -113,6 +115,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu1.itb @@ -492,6 +495,7 @@ bus_id=0 [trace] flags= start=0 +cycle=0 bufsize=0 file=cout dump_on_exit=false @@ -535,6 +539,9 @@ trace_system=client [debug] break_cycles= +[statsreset] +reset_cycle=0 + [pseudo_inst] quiesce=true statistics=true diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console index c3c7b2676..4a397ddbf 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console @@ -74,7 +74,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb hdb: M5 IDE Disk, ATA DISK drive ide0 at 0x8410-0x8417,0x8422 on irq 31 hda: max request size: 128KiB - hda: 163296 sectors (83 MB), CHS=162/16/63, UDMA(33) + hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: hda1 hdb: max request size: 128KiB hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) @@ -102,10 +102,6 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb All bugs added by David S. Miller VFS: Mounted root (ext2 filesystem) readonly. Freeing unused kernel memory: 480k freed - init started: BusyBox v1.00-rc2 (2004.11.18-16:22+0000) multi-call binary - -PTXdist-0.7.0 (2004-11-18T11:23:40-0500) - + init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary mounting filesystems... -EXT2-fs warning: checktime reached, running e2fsck is recommended - loading script... +loading script... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt index 666766e20..bf7320067 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt @@ -1,239 +1,232 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 804715 # Simulator instruction rate (inst/s) -host_mem_usage 194628 # Number of bytes of host memory used -host_seconds 78.40 # Real time elapsed on the host -host_tick_rate 45146741 # Simulator tick rate (ticks/s) +host_inst_rate 825990 # Simulator instruction rate (inst/s) +host_mem_usage 193572 # Number of bytes of host memory used +host_seconds 74.01 # Real time elapsed on the host +host_tick_rate 47654938 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 63088076 # Number of instructions simulated -sim_seconds 1.769718 # Number of seconds simulated -sim_ticks 3539435029 # Number of ticks simulated -system.cpu0.dtb.accesses 1831687 # DTB accesses -system.cpu0.dtb.acv 360 # DTB access violations -system.cpu0.dtb.hits 10286150 # DTB hits -system.cpu0.dtb.misses 11050 # DTB misses -system.cpu0.dtb.read_accesses 495437 # DTB read accesses -system.cpu0.dtb.read_acv 219 # DTB read access violations -system.cpu0.dtb.read_hits 5741423 # DTB read hits -system.cpu0.dtb.read_misses 9036 # DTB read misses -system.cpu0.dtb.write_accesses 1336250 # DTB write accesses -system.cpu0.dtb.write_acv 141 # DTB write access violations -system.cpu0.dtb.write_hits 4544727 # DTB write hits -system.cpu0.dtb.write_misses 2014 # DTB write misses -system.cpu0.idle_fraction 0.984526 # Percentage of idle cycles -system.cpu0.itb.accesses 2328068 # ITB accesses -system.cpu0.itb.acv 216 # ITB acv -system.cpu0.itb.hits 2323500 # ITB hits -system.cpu0.itb.misses 4568 # ITB misses -system.cpu0.kern.callpal 145575 # number of callpals executed +sim_insts 61131962 # Number of instructions simulated +sim_seconds 1.763494 # Number of seconds simulated +sim_ticks 3526987181 # Number of ticks simulated +system.cpu0.dtb.accesses 1987164 # DTB accesses +system.cpu0.dtb.acv 291 # DTB access violations +system.cpu0.dtb.hits 10431590 # DTB hits +system.cpu0.dtb.misses 9590 # DTB misses +system.cpu0.dtb.read_accesses 606328 # DTB read accesses +system.cpu0.dtb.read_acv 174 # DTB read access violations +system.cpu0.dtb.read_hits 5831565 # DTB read hits +system.cpu0.dtb.read_misses 7663 # DTB read misses +system.cpu0.dtb.write_accesses 1380836 # DTB write accesses +system.cpu0.dtb.write_acv 117 # DTB write access violations +system.cpu0.dtb.write_hits 4600025 # DTB write hits +system.cpu0.dtb.write_misses 1927 # DTB write misses +system.cpu0.idle_fraction 0.984514 # Percentage of idle cycles +system.cpu0.itb.accesses 2372045 # ITB accesses +system.cpu0.itb.acv 143 # ITB acv +system.cpu0.itb.hits 2368331 # ITB hits +system.cpu0.itb.misses 3714 # ITB misses +system.cpu0.kern.callpal 145084 # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu0.kern.callpal_wripir 45 0.03% 0.03% # number of callpals executed -system.cpu0.kern.callpal_wrmces 1 0.00% 0.03% # number of callpals executed -system.cpu0.kern.callpal_wrfen 1 0.00% 0.03% # number of callpals executed -system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.03% # number of callpals executed -system.cpu0.kern.callpal_swpctx 1334 0.92% 0.95% # number of callpals executed -system.cpu0.kern.callpal_tbi 20 0.01% 0.96% # number of callpals executed -system.cpu0.kern.callpal_wrent 7 0.00% 0.97% # number of callpals executed -system.cpu0.kern.callpal_swpipl 135235 92.90% 93.87% # number of callpals executed -system.cpu0.kern.callpal_rdps 4594 3.16% 97.02% # number of callpals executed -system.cpu0.kern.callpal_wrkgp 1 0.00% 97.02% # number of callpals executed -system.cpu0.kern.callpal_wrusp 4 0.00% 97.02% # number of callpals executed -system.cpu0.kern.callpal_rdusp 11 0.01% 97.03% # number of callpals executed -system.cpu0.kern.callpal_whami 2 0.00% 97.03% # number of callpals executed -system.cpu0.kern.callpal_rti 3660 2.51% 99.55% # number of callpals executed -system.cpu0.kern.callpal_callsys 461 0.32% 99.86% # number of callpals executed -system.cpu0.kern.callpal_imb 197 0.14% 100.00% # number of callpals executed +system.cpu0.kern.callpal_wripir 54 0.04% 0.04% # number of callpals executed +system.cpu0.kern.callpal_wrmces 1 0.00% 0.04% # number of callpals executed +system.cpu0.kern.callpal_wrfen 1 0.00% 0.04% # number of callpals executed +system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.04% # number of callpals executed +system.cpu0.kern.callpal_swpctx 1182 0.81% 0.85% # number of callpals executed +system.cpu0.kern.callpal_tbi 42 0.03% 0.88% # number of callpals executed +system.cpu0.kern.callpal_wrent 7 0.00% 0.89% # number of callpals executed +system.cpu0.kern.callpal_swpipl 135050 93.08% 93.97% # number of callpals executed +system.cpu0.kern.callpal_rdps 4795 3.30% 97.28% # number of callpals executed +system.cpu0.kern.callpal_wrkgp 1 0.00% 97.28% # number of callpals executed +system.cpu0.kern.callpal_wrusp 5 0.00% 97.28% # number of callpals executed +system.cpu0.kern.callpal_rdusp 8 0.01% 97.29% # number of callpals executed +system.cpu0.kern.callpal_whami 2 0.00% 97.29% # number of callpals executed +system.cpu0.kern.callpal_rti 3431 2.36% 99.65% # number of callpals executed +system.cpu0.kern.callpal_callsys 364 0.25% 99.90% # number of callpals executed +system.cpu0.kern.callpal_imb 139 0.10% 100.00% # number of callpals executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed -system.cpu0.kern.inst.hwrei 163916 # number of hwrei instructions executed +system.cpu0.kern.inst.hwrei 160926 # number of hwrei instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu0.kern.inst.quiesce 1952 # number of quiesce instructions executed -system.cpu0.kern.ipl_count 141041 # number of times we switched to this ipl -system.cpu0.kern.ipl_count_0 56950 40.38% 40.38% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_21 286 0.20% 40.58% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_22 5513 3.91% 44.49% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_30 52 0.04% 44.53% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_31 78240 55.47% 100.00% # number of times we switched to this ipl -system.cpu0.kern.ipl_good 123339 # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_0 56917 46.15% 46.15% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_21 286 0.23% 46.38% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_22 5513 4.47% 50.85% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_30 52 0.04% 50.89% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_31 60571 49.11% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_ticks 3539063979 # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_0 3513499166 99.28% 99.28% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_21 60705 0.00% 99.28% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_22 1354114 0.04% 99.32% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_30 18748 0.00% 99.32% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_31 24131246 0.68% 100.00% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_used 0.874490 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_0 0.999421 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.inst.quiesce 1958 # number of quiesce instructions executed +system.cpu0.kern.ipl_count 140584 # number of times we switched to this ipl +system.cpu0.kern.ipl_count_0 56549 40.22% 40.22% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_21 251 0.18% 40.40% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_22 5487 3.90% 44.31% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_30 51 0.04% 44.34% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_31 78246 55.66% 100.00% # number of times we switched to this ipl +system.cpu0.kern.ipl_good 122461 # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_0 56518 46.15% 46.15% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_21 251 0.20% 46.36% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_22 5487 4.48% 50.84% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_30 51 0.04% 50.88% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_31 60154 49.12% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_ticks 3526986735 # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_0 3501352281 99.27% 99.27% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_21 53019 0.00% 99.27% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_22 1348211 0.04% 99.31% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_30 18326 0.00% 99.31% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_31 24214898 0.69% 100.00% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_used 0.871088 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.ipl_used_0 0.999452 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_31 0.774169 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.mode_good_kernel 1632 -system.cpu0.kern.mode_good_user 1487 -system.cpu0.kern.mode_good_idle 145 -system.cpu0.kern.mode_switch_kernel 2857 # number of protection mode switches -system.cpu0.kern.mode_switch_user 1487 # number of protection mode switches -system.cpu0.kern.mode_switch_idle 2125 # number of protection mode switches -system.cpu0.kern.mode_switch_good 0.504560 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_kernel 0.571229 # fraction of useful protection mode switches +system.cpu0.kern.ipl_used_31 0.768781 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.mode_good_kernel 1448 +system.cpu0.kern.mode_good_user 1300 +system.cpu0.kern.mode_good_idle 148 +system.cpu0.kern.mode_switch_kernel 2490 # number of protection mode switches +system.cpu0.kern.mode_switch_user 1300 # number of protection mode switches +system.cpu0.kern.mode_switch_idle 2110 # number of protection mode switches +system.cpu0.kern.mode_switch_good 0.490847 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good_kernel 0.581526 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_idle 0.068235 # fraction of useful protection mode switches -system.cpu0.kern.mode_ticks_kernel 23634401 0.67% 0.67% # number of ticks spent at the given mode -system.cpu0.kern.mode_ticks_user 3241731 0.09% 0.76% # number of ticks spent at the given mode -system.cpu0.kern.mode_ticks_idle 3511854943 99.24% 100.00% # number of ticks spent at the given mode -system.cpu0.kern.swap_context 1335 # number of times the context was actually changed -system.cpu0.kern.syscall 312 # number of syscalls executed -system.cpu0.kern.syscall_fork 9 2.88% 2.88% # number of syscalls executed -system.cpu0.kern.syscall_read 20 6.41% 9.29% # number of syscalls executed -system.cpu0.kern.syscall_write 6 1.92% 11.22% # number of syscalls executed -system.cpu0.kern.syscall_close 36 11.54% 22.76% # number of syscalls executed -system.cpu0.kern.syscall_chdir 1 0.32% 23.08% # number of syscalls executed -system.cpu0.kern.syscall_chmod 1 0.32% 23.40% # number of syscalls executed -system.cpu0.kern.syscall_obreak 26 8.33% 31.73% # number of syscalls executed -system.cpu0.kern.syscall_lseek 9 2.88% 34.62% # number of syscalls executed -system.cpu0.kern.syscall_getpid 8 2.56% 37.18% # number of syscalls executed -system.cpu0.kern.syscall_setuid 2 0.64% 37.82% # number of syscalls executed -system.cpu0.kern.syscall_getuid 4 1.28% 39.10% # number of syscalls executed -system.cpu0.kern.syscall_access 4 1.28% 40.38% # number of syscalls executed -system.cpu0.kern.syscall_dup 4 1.28% 41.67% # number of syscalls executed -system.cpu0.kern.syscall_open 40 12.82% 54.49% # number of syscalls executed -system.cpu0.kern.syscall_getgid 4 1.28% 55.77% # number of syscalls executed -system.cpu0.kern.syscall_sigprocmask 12 3.85% 59.62% # number of syscalls executed -system.cpu0.kern.syscall_ioctl 13 4.17% 63.78% # number of syscalls executed -system.cpu0.kern.syscall_readlink 1 0.32% 64.10% # number of syscalls executed -system.cpu0.kern.syscall_execve 7 2.24% 66.35% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_stat 22 7.05% 73.40% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_lstat 1 0.32% 73.72% # number of syscalls executed -system.cpu0.kern.syscall_mmap 28 8.97% 82.69% # number of syscalls executed -system.cpu0.kern.syscall_munmap 4 1.28% 83.97% # number of syscalls executed -system.cpu0.kern.syscall_mprotect 7 2.24% 86.22% # number of syscalls executed -system.cpu0.kern.syscall_gethostname 1 0.32% 86.54% # number of syscalls executed -system.cpu0.kern.syscall_dup2 3 0.96% 87.50% # number of syscalls executed -system.cpu0.kern.syscall_pre_F64_fstat 15 4.81% 92.31% # number of syscalls executed -system.cpu0.kern.syscall_fcntl 11 3.53% 95.83% # number of syscalls executed -system.cpu0.kern.syscall_socket 3 0.96% 96.79% # number of syscalls executed -system.cpu0.kern.syscall_connect 3 0.96% 97.76% # number of syscalls executed -system.cpu0.kern.syscall_setgid 2 0.64% 98.40% # number of syscalls executed -system.cpu0.kern.syscall_getrlimit 2 0.64% 99.04% # number of syscalls executed -system.cpu0.kern.syscall_setsid 3 0.96% 100.00% # number of syscalls executed -system.cpu0.not_idle_fraction 0.015474 # Percentage of non-idle cycles +system.cpu0.kern.mode_switch_good_idle 0.070142 # fraction of useful protection mode switches +system.cpu0.kern.mode_ticks_kernel 23256451 0.66% 0.66% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_user 3397192 0.10% 0.76% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_idle 3500333090 99.24% 100.00% # number of ticks spent at the given mode +system.cpu0.kern.swap_context 1183 # number of times the context was actually changed +system.cpu0.kern.syscall 231 # number of syscalls executed +system.cpu0.kern.syscall_fork 6 2.60% 2.60% # number of syscalls executed +system.cpu0.kern.syscall_read 17 7.36% 9.96% # number of syscalls executed +system.cpu0.kern.syscall_write 4 1.73% 11.69% # number of syscalls executed +system.cpu0.kern.syscall_close 31 13.42% 25.11% # number of syscalls executed +system.cpu0.kern.syscall_chdir 1 0.43% 25.54% # number of syscalls executed +system.cpu0.kern.syscall_obreak 11 4.76% 30.30% # number of syscalls executed +system.cpu0.kern.syscall_lseek 6 2.60% 32.90% # number of syscalls executed +system.cpu0.kern.syscall_getpid 4 1.73% 34.63% # number of syscalls executed +system.cpu0.kern.syscall_setuid 2 0.87% 35.50% # number of syscalls executed +system.cpu0.kern.syscall_getuid 4 1.73% 37.23% # number of syscalls executed +system.cpu0.kern.syscall_access 9 3.90% 41.13% # number of syscalls executed +system.cpu0.kern.syscall_dup 2 0.87% 41.99% # number of syscalls executed +system.cpu0.kern.syscall_open 42 18.18% 60.17% # number of syscalls executed +system.cpu0.kern.syscall_getgid 4 1.73% 61.90% # number of syscalls executed +system.cpu0.kern.syscall_sigprocmask 7 3.03% 64.94% # number of syscalls executed +system.cpu0.kern.syscall_ioctl 9 3.90% 68.83% # number of syscalls executed +system.cpu0.kern.syscall_readlink 1 0.43% 69.26% # number of syscalls executed +system.cpu0.kern.syscall_execve 4 1.73% 71.00% # number of syscalls executed +system.cpu0.kern.syscall_mmap 35 15.15% 86.15% # number of syscalls executed +system.cpu0.kern.syscall_munmap 2 0.87% 87.01% # number of syscalls executed +system.cpu0.kern.syscall_mprotect 10 4.33% 91.34% # number of syscalls executed +system.cpu0.kern.syscall_gethostname 1 0.43% 91.77% # number of syscalls executed +system.cpu0.kern.syscall_dup2 2 0.87% 92.64% # number of syscalls executed +system.cpu0.kern.syscall_fcntl 8 3.46% 96.10% # number of syscalls executed +system.cpu0.kern.syscall_socket 2 0.87% 96.97% # number of syscalls executed +system.cpu0.kern.syscall_connect 2 0.87% 97.84% # number of syscalls executed +system.cpu0.kern.syscall_setgid 2 0.87% 98.70% # number of syscalls executed +system.cpu0.kern.syscall_getrlimit 1 0.43% 99.13% # number of syscalls executed +system.cpu0.kern.syscall_setsid 2 0.87% 100.00% # number of syscalls executed +system.cpu0.not_idle_fraction 0.015486 # Percentage of non-idle cycles system.cpu0.numCycles 0 # number of cpu cycles simulated -system.cpu0.num_insts 44447414 # Number of instructions executed -system.cpu0.num_refs 10321518 # Number of memory references -system.cpu1.dtb.accesses 524398 # DTB accesses -system.cpu1.dtb.acv 60 # DTB access violations -system.cpu1.dtb.hits 4612716 # DTB hits -system.cpu1.dtb.misses 5263 # DTB misses -system.cpu1.dtb.read_accesses 337746 # DTB read accesses -system.cpu1.dtb.read_acv 23 # DTB read access violations -system.cpu1.dtb.read_hits 2649302 # DTB read hits -system.cpu1.dtb.read_misses 4766 # DTB read misses -system.cpu1.dtb.write_accesses 186652 # DTB write accesses -system.cpu1.dtb.write_acv 37 # DTB write access violations -system.cpu1.dtb.write_hits 1963414 # DTB write hits -system.cpu1.dtb.write_misses 497 # DTB write misses -system.cpu1.idle_fraction 0.993423 # Percentage of idle cycles -system.cpu1.itb.accesses 1711918 # ITB accesses -system.cpu1.itb.acv 23 # ITB acv -system.cpu1.itb.hits 1709683 # ITB hits -system.cpu1.itb.misses 2235 # ITB misses -system.cpu1.kern.callpal 58341 # number of callpals executed +system.cpu0.num_insts 44155958 # Number of instructions executed +system.cpu0.num_refs 10463340 # Number of memory references +system.cpu1.dtb.accesses 323344 # DTB accesses +system.cpu1.dtb.acv 82 # DTB access violations +system.cpu1.dtb.hits 4234985 # DTB hits +system.cpu1.dtb.misses 2977 # DTB misses +system.cpu1.dtb.read_accesses 222873 # DTB read accesses +system.cpu1.dtb.read_acv 36 # DTB read access violations +system.cpu1.dtb.read_hits 2431648 # DTB read hits +system.cpu1.dtb.read_misses 2698 # DTB read misses +system.cpu1.dtb.write_accesses 100471 # DTB write accesses +system.cpu1.dtb.write_acv 46 # DTB write access violations +system.cpu1.dtb.write_hits 1803337 # DTB write hits +system.cpu1.dtb.write_misses 279 # DTB write misses +system.cpu1.idle_fraction 0.993979 # Percentage of idle cycles +system.cpu1.itb.accesses 912010 # ITB accesses +system.cpu1.itb.acv 41 # ITB acv +system.cpu1.itb.hits 910678 # ITB hits +system.cpu1.itb.misses 1332 # ITB misses +system.cpu1.kern.callpal 57529 # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu1.kern.callpal_wripir 52 0.09% 0.09% # number of callpals executed +system.cpu1.kern.callpal_wripir 51 0.09% 0.09% # number of callpals executed system.cpu1.kern.callpal_wrmces 1 0.00% 0.09% # number of callpals executed system.cpu1.kern.callpal_wrfen 1 0.00% 0.09% # number of callpals executed -system.cpu1.kern.callpal_swpctx 588 1.01% 1.10% # number of callpals executed -system.cpu1.kern.callpal_tbi 7 0.01% 1.11% # number of callpals executed -system.cpu1.kern.callpal_wrent 7 0.01% 1.13% # number of callpals executed -system.cpu1.kern.callpal_swpipl 54562 93.52% 94.65% # number of callpals executed -system.cpu1.kern.callpal_rdps 217 0.37% 95.02% # number of callpals executed -system.cpu1.kern.callpal_wrkgp 1 0.00% 95.02% # number of callpals executed -system.cpu1.kern.callpal_wrusp 4 0.01% 95.03% # number of callpals executed -system.cpu1.kern.callpal_rdusp 1 0.00% 95.03% # number of callpals executed -system.cpu1.kern.callpal_whami 3 0.01% 95.04% # number of callpals executed -system.cpu1.kern.callpal_rti 2571 4.41% 99.44% # number of callpals executed -system.cpu1.kern.callpal_callsys 208 0.36% 99.80% # number of callpals executed -system.cpu1.kern.callpal_imb 116 0.20% 100.00% # number of callpals executed +system.cpu1.kern.callpal_swpctx 451 0.78% 0.88% # number of callpals executed +system.cpu1.kern.callpal_tbi 12 0.02% 0.90% # number of callpals executed +system.cpu1.kern.callpal_wrent 7 0.01% 0.91% # number of callpals executed +system.cpu1.kern.callpal_swpipl 54081 94.01% 94.92% # number of callpals executed +system.cpu1.kern.callpal_rdps 368 0.64% 95.56% # number of callpals executed +system.cpu1.kern.callpal_wrkgp 1 0.00% 95.56% # number of callpals executed +system.cpu1.kern.callpal_wrusp 2 0.00% 95.56% # number of callpals executed +system.cpu1.kern.callpal_rdusp 2 0.00% 95.57% # number of callpals executed +system.cpu1.kern.callpal_whami 3 0.01% 95.57% # number of callpals executed +system.cpu1.kern.callpal_rti 2337 4.06% 99.63% # number of callpals executed +system.cpu1.kern.callpal_callsys 169 0.29% 99.93% # number of callpals executed +system.cpu1.kern.callpal_imb 41 0.07% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed -system.cpu1.kern.inst.hwrei 67770 # number of hwrei instructions executed +system.cpu1.kern.inst.hwrei 63811 # number of hwrei instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu1.kern.inst.quiesce 1892 # number of quiesce instructions executed -system.cpu1.kern.ipl_count 58980 # number of times we switched to this ipl -system.cpu1.kern.ipl_count_0 25467 43.18% 43.18% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_22 5476 9.28% 52.46% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_30 45 0.08% 52.54% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_31 27992 47.46% 100.00% # number of times we switched to this ipl -system.cpu1.kern.ipl_good 58199 # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_0 25424 43.68% 43.68% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_22 5476 9.41% 53.09% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_30 45 0.08% 53.17% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_31 27254 46.83% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_ticks 3539434499 # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_0 3510645847 99.19% 99.19% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_22 1415637 0.04% 99.23% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_30 16792 0.00% 99.23% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_31 27356223 0.77% 100.00% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_used 0.986758 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_0 0.998312 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.inst.quiesce 1898 # number of quiesce instructions executed +system.cpu1.kern.ipl_count 58267 # number of times we switched to this ipl +system.cpu1.kern.ipl_count_0 25040 42.97% 42.97% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_22 5452 9.36% 52.33% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_30 54 0.09% 52.42% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_31 27721 47.58% 100.00% # number of times we switched to this ipl +system.cpu1.kern.ipl_good 57331 # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_0 25007 43.62% 43.62% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_22 5452 9.51% 53.13% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_30 54 0.09% 53.22% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_31 26818 46.78% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_ticks 3526422675 # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_0 3497592433 99.18% 99.18% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_22 1410084 0.04% 99.22% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_30 19740 0.00% 99.22% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_31 27400418 0.78% 100.00% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_used 0.983936 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.ipl_used_0 0.998682 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_31 0.973635 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.mode_good_kernel 690 -system.cpu1.kern.mode_good_user 691 +system.cpu1.kern.ipl_used_31 0.967425 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.mode_good_kernel 465 +system.cpu1.kern.mode_good_user 465 system.cpu1.kern.mode_good_idle 0 -system.cpu1.kern.mode_switch_kernel 3141 # number of protection mode switches -system.cpu1.kern.mode_switch_user 691 # number of protection mode switches +system.cpu1.kern.mode_switch_kernel 2771 # number of protection mode switches +system.cpu1.kern.mode_switch_user 465 # number of protection mode switches system.cpu1.kern.mode_switch_idle 0 # number of protection mode switches -system.cpu1.kern.mode_switch_good 0.360386 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_kernel 0.219675 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good 0.287392 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good_kernel 0.167809 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_idle # fraction of useful protection mode switches -system.cpu1.kern.mode_ticks_kernel 3537141786 99.94% 99.94% # number of ticks spent at the given mode -system.cpu1.kern.mode_ticks_user 2292711 0.06% 100.00% # number of ticks spent at the given mode +system.cpu1.kern.mode_switch_good_idle no value # fraction of useful protection mode switches +system.cpu1.kern.mode_ticks_kernel 3525066043 99.96% 99.96% # number of ticks spent at the given mode +system.cpu1.kern.mode_ticks_user 1294184 0.04% 100.00% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode -system.cpu1.kern.swap_context 589 # number of times the context was actually changed -system.cpu1.kern.syscall 163 # number of syscalls executed -system.cpu1.kern.syscall_fork 1 0.61% 0.61% # number of syscalls executed -system.cpu1.kern.syscall_read 13 7.98% 8.59% # number of syscalls executed -system.cpu1.kern.syscall_write 1 0.61% 9.20% # number of syscalls executed -system.cpu1.kern.syscall_close 13 7.98% 17.18% # number of syscalls executed -system.cpu1.kern.syscall_obreak 18 11.04% 28.22% # number of syscalls executed -system.cpu1.kern.syscall_lseek 4 2.45% 30.67% # number of syscalls executed -system.cpu1.kern.syscall_getpid 2 1.23% 31.90% # number of syscalls executed -system.cpu1.kern.syscall_setuid 2 1.23% 33.13% # number of syscalls executed -system.cpu1.kern.syscall_getuid 4 2.45% 35.58% # number of syscalls executed -system.cpu1.kern.syscall_open 28 17.18% 52.76% # number of syscalls executed -system.cpu1.kern.syscall_getgid 4 2.45% 55.21% # number of syscalls executed -system.cpu1.kern.syscall_sigprocmask 2 1.23% 56.44% # number of syscalls executed -system.cpu1.kern.syscall_ioctl 3 1.84% 58.28% # number of syscalls executed -system.cpu1.kern.syscall_readlink 1 0.61% 58.90% # number of syscalls executed -system.cpu1.kern.syscall_execve 1 0.61% 59.51% # number of syscalls executed -system.cpu1.kern.syscall_pre_F64_stat 9 5.52% 65.03% # number of syscalls executed -system.cpu1.kern.syscall_mmap 27 16.56% 81.60% # number of syscalls executed -system.cpu1.kern.syscall_munmap 2 1.23% 82.82% # number of syscalls executed -system.cpu1.kern.syscall_mprotect 7 4.29% 87.12% # number of syscalls executed -system.cpu1.kern.syscall_gethostname 1 0.61% 87.73% # number of syscalls executed -system.cpu1.kern.syscall_dup2 1 0.61% 88.34% # number of syscalls executed -system.cpu1.kern.syscall_pre_F64_fstat 13 7.98% 96.32% # number of syscalls executed -system.cpu1.kern.syscall_fcntl 3 1.84% 98.16% # number of syscalls executed -system.cpu1.kern.syscall_setgid 2 1.23% 99.39% # number of syscalls executed -system.cpu1.kern.syscall_getrlimit 1 0.61% 100.00% # number of syscalls executed -system.cpu1.not_idle_fraction 0.006577 # Percentage of non-idle cycles +system.cpu1.kern.swap_context 452 # number of times the context was actually changed +system.cpu1.kern.syscall 98 # number of syscalls executed +system.cpu1.kern.syscall_fork 2 2.04% 2.04% # number of syscalls executed +system.cpu1.kern.syscall_read 13 13.27% 15.31% # number of syscalls executed +system.cpu1.kern.syscall_close 12 12.24% 27.55% # number of syscalls executed +system.cpu1.kern.syscall_chmod 1 1.02% 28.57% # number of syscalls executed +system.cpu1.kern.syscall_obreak 4 4.08% 32.65% # number of syscalls executed +system.cpu1.kern.syscall_lseek 4 4.08% 36.73% # number of syscalls executed +system.cpu1.kern.syscall_getpid 2 2.04% 38.78% # number of syscalls executed +system.cpu1.kern.syscall_setuid 2 2.04% 40.82% # number of syscalls executed +system.cpu1.kern.syscall_getuid 2 2.04% 42.86% # number of syscalls executed +system.cpu1.kern.syscall_access 2 2.04% 44.90% # number of syscalls executed +system.cpu1.kern.syscall_open 13 13.27% 58.16% # number of syscalls executed +system.cpu1.kern.syscall_getgid 2 2.04% 60.20% # number of syscalls executed +system.cpu1.kern.syscall_sigprocmask 3 3.06% 63.27% # number of syscalls executed +system.cpu1.kern.syscall_ioctl 1 1.02% 64.29% # number of syscalls executed +system.cpu1.kern.syscall_execve 3 3.06% 67.35% # number of syscalls executed +system.cpu1.kern.syscall_mmap 19 19.39% 86.73% # number of syscalls executed +system.cpu1.kern.syscall_munmap 1 1.02% 87.76% # number of syscalls executed +system.cpu1.kern.syscall_mprotect 6 6.12% 93.88% # number of syscalls executed +system.cpu1.kern.syscall_dup2 1 1.02% 94.90% # number of syscalls executed +system.cpu1.kern.syscall_fcntl 2 2.04% 96.94% # number of syscalls executed +system.cpu1.kern.syscall_setgid 2 2.04% 98.98% # number of syscalls executed +system.cpu1.kern.syscall_getrlimit 1 1.02% 100.00% # number of syscalls executed +system.cpu1.not_idle_fraction 0.006021 # Percentage of non-idle cycles system.cpu1.numCycles 0 # number of cpu cycles simulated -system.cpu1.num_insts 18640662 # Number of instructions executed -system.cpu1.num_refs 4633112 # Number of memory references +system.cpu1.num_insts 16976004 # Number of instructions executed +system.cpu1.num_refs 4251312 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). -system.disk0.dma_write_bytes 2521088 # Number of bytes transfered via DMA writes. -system.disk0.dma_write_full_pages 285 # Number of full page size DMA writes. -system.disk0.dma_write_txs 375 # Number of DMA write transactions. +system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. +system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. +system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout index 33c194686..2c496b914 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Sep 5 2006 15:32:34 -M5 started Tue Sep 5 15:45:11 2006 +M5 compiled Oct 5 2006 22:13:02 +M5 started Fri Oct 6 00:26:09 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual -Exiting @ tick 3539435029 because m5_exit instruction encountered +Exiting @ tick 3526987181 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini index 2a354dee0..7f27ca121 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini @@ -58,6 +58,7 @@ mem_mode=timing pal=/dist/m5/system/binaries/ts_osfpal physmem=system.physmem readfile=tests/halt.sh +symbolfile= system_rev=1024 system_type=34 @@ -86,6 +87,7 @@ max_loads_all_threads=0 max_loads_any_thread=0 mem=system.physmem profile=0 +progress_interval=0 system=system dcache_port=system.membus.port[3] icache_port=system.membus.port[2] @@ -552,6 +554,7 @@ pio=system.iobus.port[24] [trace] bufsize=0 +cycle=0 dump_on_exit=false file=cout flags= diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out index 1b99934c9..deba80368 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out @@ -21,6 +21,7 @@ console=/dist/m5/system/binaries/console pal=/dist/m5/system/binaries/ts_osfpal boot_osflags=root=/dev/hda1 console=ttyS0 readfile=tests/halt.sh +symbolfile= init_param=0 system_type=34 system_rev=1024 @@ -86,6 +87,7 @@ max_insts_any_thread=0 max_insts_all_threads=0 max_loads_any_thread=0 max_loads_all_threads=0 +progress_interval=0 mem=system.physmem system=system itb=system.cpu.itb @@ -465,6 +467,7 @@ bus_id=0 [trace] flags= start=0 +cycle=0 bufsize=0 file=cout dump_on_exit=false @@ -508,6 +511,9 @@ trace_system=client [debug] break_cycles= +[statsreset] +reset_cycle=0 + [pseudo_inst] quiesce=true statistics=true diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console index ea7a20777..d6e3955cc 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console @@ -69,7 +69,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 hdb: M5 IDE Disk, ATA DISK drive ide0 at 0x8410-0x8417,0x8422 on irq 31 hda: max request size: 128KiB - hda: 163296 sectors (83 MB), CHS=162/16/63, UDMA(33) + hda: 511056 sectors (261 MB), CHS=507/16/63, UDMA(33) hda: hda1 hdb: max request size: 128KiB hdb: 4177920 sectors (2139 MB), CHS=4144/16/63, UDMA(33) @@ -97,10 +97,6 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 All bugs added by David S. Miller VFS: Mounted root (ext2 filesystem) readonly. Freeing unused kernel memory: 480k freed - init started: BusyBox v1.00-rc2 (2004.11.18-16:22+0000) multi-call binary - -PTXdist-0.7.0 (2004-11-18T11:23:40-0500) - + init started: BusyBox v1.1.0 (2006.08.17-02:54+0000) multi-call binary mounting filesystems... -EXT2-fs warning: checktime reached, running e2fsck is recommended - loading script... +loading script... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt index 0adb4cc31..1d45d41a9 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt @@ -1,130 +1,127 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 835908 # Simulator instruction rate (inst/s) -host_mem_usage 194192 # Number of bytes of host memory used -host_seconds 71.68 # Real time elapsed on the host -host_tick_rate 48916813 # Simulator tick rate (ticks/s) +host_inst_rate 820839 # Simulator instruction rate (inst/s) +host_mem_usage 193264 # Number of bytes of host memory used +host_seconds 70.65 # Real time elapsed on the host +host_tick_rate 49454399 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 59915182 # Number of instructions simulated -sim_seconds 1.753109 # Number of seconds simulated -sim_ticks 3506218170 # Number of ticks simulated -system.cpu.dtb.accesses 2354955 # DTB accesses -system.cpu.dtb.acv 413 # DTB access violations -system.cpu.dtb.hits 13926686 # DTB hits -system.cpu.dtb.misses 16187 # DTB misses -system.cpu.dtb.read_accesses 832415 # DTB read accesses -system.cpu.dtb.read_acv 242 # DTB read access violations -system.cpu.dtb.read_hits 7716658 # DTB read hits -system.cpu.dtb.read_misses 13695 # DTB read misses -system.cpu.dtb.write_accesses 1522540 # DTB write accesses -system.cpu.dtb.write_acv 171 # DTB write access violations -system.cpu.dtb.write_hits 6210028 # DTB write hits -system.cpu.dtb.write_misses 2492 # DTB write misses -system.cpu.idle_fraction 0.978925 # Percentage of idle cycles -system.cpu.itb.accesses 4037381 # ITB accesses -system.cpu.itb.acv 239 # ITB acv -system.cpu.itb.hits 4030657 # ITB hits -system.cpu.itb.misses 6724 # ITB misses -system.cpu.kern.callpal 183644 # number of callpals executed +sim_insts 57989043 # Number of instructions simulated +sim_seconds 1.746889 # Number of seconds simulated +sim_ticks 3493777466 # Number of ticks simulated +system.cpu.dtb.accesses 2309470 # DTB accesses +system.cpu.dtb.acv 367 # DTB access violations +system.cpu.dtb.hits 13707871 # DTB hits +system.cpu.dtb.misses 12493 # DTB misses +system.cpu.dtb.read_accesses 828530 # DTB read accesses +system.cpu.dtb.read_acv 210 # DTB read access violations +system.cpu.dtb.read_hits 7595606 # DTB read hits +system.cpu.dtb.read_misses 10298 # DTB read misses +system.cpu.dtb.write_accesses 1480940 # DTB write accesses +system.cpu.dtb.write_acv 157 # DTB write access violations +system.cpu.dtb.write_hits 6112265 # DTB write hits +system.cpu.dtb.write_misses 2195 # DTB write misses +system.cpu.idle_fraction 0.979465 # Percentage of idle cycles +system.cpu.itb.accesses 3281347 # ITB accesses +system.cpu.itb.acv 184 # ITB acv +system.cpu.itb.hits 3276357 # ITB hits +system.cpu.itb.misses 4990 # ITB misses +system.cpu.kern.callpal 182454 # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed -system.cpu.kern.callpal_swpctx 1861 1.01% 1.02% # number of callpals executed -system.cpu.kern.callpal_tbi 28 0.02% 1.03% # number of callpals executed -system.cpu.kern.callpal_wrent 7 0.00% 1.03% # number of callpals executed -system.cpu.kern.callpal_swpipl 171635 93.46% 94.50% # number of callpals executed -system.cpu.kern.callpal_rdps 4808 2.62% 97.11% # number of callpals executed -system.cpu.kern.callpal_wrkgp 1 0.00% 97.11% # number of callpals executed -system.cpu.kern.callpal_wrusp 8 0.00% 97.12% # number of callpals executed -system.cpu.kern.callpal_rdusp 12 0.01% 97.12% # number of callpals executed -system.cpu.kern.callpal_whami 2 0.00% 97.13% # number of callpals executed -system.cpu.kern.callpal_rti 4297 2.34% 99.47% # number of callpals executed -system.cpu.kern.callpal_callsys 667 0.36% 99.83% # number of callpals executed -system.cpu.kern.callpal_imb 314 0.17% 100.00% # number of callpals executed +system.cpu.kern.callpal_swpctx 1571 0.86% 0.86% # number of callpals executed +system.cpu.kern.callpal_tbi 54 0.03% 0.89% # number of callpals executed +system.cpu.kern.callpal_wrent 7 0.00% 0.90% # number of callpals executed +system.cpu.kern.callpal_swpipl 171092 93.77% 94.67% # number of callpals executed +system.cpu.kern.callpal_rdps 5160 2.83% 97.50% # number of callpals executed +system.cpu.kern.callpal_wrkgp 1 0.00% 97.50% # number of callpals executed +system.cpu.kern.callpal_wrusp 7 0.00% 97.50% # number of callpals executed +system.cpu.kern.callpal_rdusp 10 0.01% 97.51% # number of callpals executed +system.cpu.kern.callpal_whami 2 0.00% 97.51% # number of callpals executed +system.cpu.kern.callpal_rti 3834 2.10% 99.61% # number of callpals executed +system.cpu.kern.callpal_callsys 531 0.29% 99.90% # number of callpals executed +system.cpu.kern.callpal_imb 181 0.10% 100.00% # number of callpals executed system.cpu.kern.inst.arm 0 # number of arm instructions executed -system.cpu.kern.inst.hwrei 209285 # number of hwrei instructions executed +system.cpu.kern.inst.hwrei 202524 # number of hwrei instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu.kern.inst.quiesce 1867 # number of quiesce instructions executed -system.cpu.kern.ipl_count 178009 # number of times we switched to this ipl -system.cpu.kern.ipl_count_0 75254 42.28% 42.28% # number of times we switched to this ipl -system.cpu.kern.ipl_count_21 286 0.16% 42.44% # number of times we switched to this ipl -system.cpu.kern.ipl_count_22 5465 3.07% 45.51% # number of times we switched to this ipl -system.cpu.kern.ipl_count_31 97004 54.49% 100.00% # number of times we switched to this ipl -system.cpu.kern.ipl_good 159802 # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_0 75188 47.05% 47.05% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_21 286 0.18% 47.23% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_22 5465 3.42% 50.65% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_31 78863 49.35% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_ticks 3506217640 # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_0 3478896122 99.22% 99.22% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_21 60705 0.00% 99.22% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_22 1274059 0.04% 99.26% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_31 25986754 0.74% 100.00% # number of cycles we spent at this ipl -system.cpu.kern.ipl_used 0.897719 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_0 0.999123 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.inst.quiesce 1876 # number of quiesce instructions executed +system.cpu.kern.ipl_count 176961 # number of times we switched to this ipl +system.cpu.kern.ipl_count_0 74471 42.08% 42.08% # number of times we switched to this ipl +system.cpu.kern.ipl_count_21 251 0.14% 42.23% # number of times we switched to this ipl +system.cpu.kern.ipl_count_22 5439 3.07% 45.30% # number of times we switched to this ipl +system.cpu.kern.ipl_count_31 96800 54.70% 100.00% # number of times we switched to this ipl +system.cpu.kern.ipl_good 158180 # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_0 74417 47.05% 47.05% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_21 251 0.16% 47.20% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_22 5439 3.44% 50.64% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_31 78073 49.36% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_ticks 3493777020 # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_0 3466334940 99.21% 99.21% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_21 53019 0.00% 99.22% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_22 1268195 0.04% 99.25% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_31 26120866 0.75% 100.00% # number of cycles we spent at this ipl +system.cpu.kern.ipl_used 0.893869 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_used_0 0.999275 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_31 0.812987 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.mode_good_kernel 2339 -system.cpu.kern.mode_good_user 2168 -system.cpu.kern.mode_good_idle 171 -system.cpu.kern.mode_switch_kernel 4093 # number of protection mode switches -system.cpu.kern.mode_switch_user 2168 # number of protection mode switches -system.cpu.kern.mode_switch_idle 2043 # number of protection mode switches -system.cpu.kern.mode_switch_good 0.563343 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_kernel 0.571463 # fraction of useful protection mode switches +system.cpu.kern.ipl_used_31 0.806539 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.mode_good_kernel 1938 +system.cpu.kern.mode_good_user 1757 +system.cpu.kern.mode_good_idle 181 +system.cpu.kern.mode_switch_kernel 3323 # number of protection mode switches +system.cpu.kern.mode_switch_user 1757 # number of protection mode switches +system.cpu.kern.mode_switch_idle 2060 # number of protection mode switches +system.cpu.kern.mode_switch_good 0.542857 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_good_kernel 0.583208 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_idle 0.083700 # fraction of useful protection mode switches -system.cpu.kern.mode_ticks_kernel 40644475 1.16% 1.16% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_user 5527486 0.16% 1.32% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_idle 3460045677 98.68% 100.00% # number of ticks spent at the given mode -system.cpu.kern.swap_context 1862 # number of times the context was actually changed -system.cpu.kern.syscall 475 # number of syscalls executed -system.cpu.kern.syscall_fork 10 2.11% 2.11% # number of syscalls executed -system.cpu.kern.syscall_read 33 6.95% 9.05% # number of syscalls executed -system.cpu.kern.syscall_write 7 1.47% 10.53% # number of syscalls executed -system.cpu.kern.syscall_close 49 10.32% 20.84% # number of syscalls executed -system.cpu.kern.syscall_chdir 1 0.21% 21.05% # number of syscalls executed -system.cpu.kern.syscall_chmod 1 0.21% 21.26% # number of syscalls executed -system.cpu.kern.syscall_obreak 44 9.26% 30.53% # number of syscalls executed -system.cpu.kern.syscall_lseek 13 2.74% 33.26% # number of syscalls executed -system.cpu.kern.syscall_getpid 10 2.11% 35.37% # number of syscalls executed -system.cpu.kern.syscall_setuid 4 0.84% 36.21% # number of syscalls executed -system.cpu.kern.syscall_getuid 8 1.68% 37.89% # number of syscalls executed -system.cpu.kern.syscall_access 4 0.84% 38.74% # number of syscalls executed -system.cpu.kern.syscall_dup 4 0.84% 39.58% # number of syscalls executed -system.cpu.kern.syscall_open 68 14.32% 53.89% # number of syscalls executed -system.cpu.kern.syscall_getgid 8 1.68% 55.58% # number of syscalls executed -system.cpu.kern.syscall_sigprocmask 14 2.95% 58.53% # number of syscalls executed -system.cpu.kern.syscall_ioctl 16 3.37% 61.89% # number of syscalls executed -system.cpu.kern.syscall_readlink 2 0.42% 62.32% # number of syscalls executed -system.cpu.kern.syscall_execve 8 1.68% 64.00% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_stat 31 6.53% 70.53% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_lstat 1 0.21% 70.74% # number of syscalls executed -system.cpu.kern.syscall_mmap 55 11.58% 82.32% # number of syscalls executed -system.cpu.kern.syscall_munmap 6 1.26% 83.58% # number of syscalls executed -system.cpu.kern.syscall_mprotect 14 2.95% 86.53% # number of syscalls executed -system.cpu.kern.syscall_gethostname 2 0.42% 86.95% # number of syscalls executed -system.cpu.kern.syscall_dup2 4 0.84% 87.79% # number of syscalls executed -system.cpu.kern.syscall_pre_F64_fstat 28 5.89% 93.68% # number of syscalls executed -system.cpu.kern.syscall_fcntl 14 2.95% 96.63% # number of syscalls executed -system.cpu.kern.syscall_socket 3 0.63% 97.26% # number of syscalls executed -system.cpu.kern.syscall_connect 3 0.63% 97.89% # number of syscalls executed -system.cpu.kern.syscall_setgid 4 0.84% 98.74% # number of syscalls executed -system.cpu.kern.syscall_getrlimit 3 0.63% 99.37% # number of syscalls executed -system.cpu.kern.syscall_setsid 3 0.63% 100.00% # number of syscalls executed -system.cpu.not_idle_fraction 0.021075 # Percentage of non-idle cycles +system.cpu.kern.mode_switch_good_idle 0.087864 # fraction of useful protection mode switches +system.cpu.kern.mode_ticks_kernel 39254786 1.12% 1.12% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_user 4685669 0.13% 1.26% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_idle 3449836563 98.74% 100.00% # number of ticks spent at the given mode +system.cpu.kern.swap_context 1572 # number of times the context was actually changed +system.cpu.kern.syscall 329 # number of syscalls executed +system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed +system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed +system.cpu.kern.syscall_write 4 1.22% 12.77% # number of syscalls executed +system.cpu.kern.syscall_close 43 13.07% 25.84% # number of syscalls executed +system.cpu.kern.syscall_chdir 1 0.30% 26.14% # number of syscalls executed +system.cpu.kern.syscall_chmod 1 0.30% 26.44% # number of syscalls executed +system.cpu.kern.syscall_obreak 15 4.56% 31.00% # number of syscalls executed +system.cpu.kern.syscall_lseek 10 3.04% 34.04% # number of syscalls executed +system.cpu.kern.syscall_getpid 6 1.82% 35.87% # number of syscalls executed +system.cpu.kern.syscall_setuid 4 1.22% 37.08% # number of syscalls executed +system.cpu.kern.syscall_getuid 6 1.82% 38.91% # number of syscalls executed +system.cpu.kern.syscall_access 11 3.34% 42.25% # number of syscalls executed +system.cpu.kern.syscall_dup 2 0.61% 42.86% # number of syscalls executed +system.cpu.kern.syscall_open 55 16.72% 59.57% # number of syscalls executed +system.cpu.kern.syscall_getgid 6 1.82% 61.40% # number of syscalls executed +system.cpu.kern.syscall_sigprocmask 10 3.04% 64.44% # number of syscalls executed +system.cpu.kern.syscall_ioctl 10 3.04% 67.48% # number of syscalls executed +system.cpu.kern.syscall_readlink 1 0.30% 67.78% # number of syscalls executed +system.cpu.kern.syscall_execve 7 2.13% 69.91% # number of syscalls executed +system.cpu.kern.syscall_mmap 54 16.41% 86.32% # number of syscalls executed +system.cpu.kern.syscall_munmap 3 0.91% 87.23% # number of syscalls executed +system.cpu.kern.syscall_mprotect 16 4.86% 92.10% # number of syscalls executed +system.cpu.kern.syscall_gethostname 1 0.30% 92.40% # number of syscalls executed +system.cpu.kern.syscall_dup2 3 0.91% 93.31% # number of syscalls executed +system.cpu.kern.syscall_fcntl 10 3.04% 96.35% # number of syscalls executed +system.cpu.kern.syscall_socket 2 0.61% 96.96% # number of syscalls executed +system.cpu.kern.syscall_connect 2 0.61% 97.57% # number of syscalls executed +system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed +system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed +system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed +system.cpu.not_idle_fraction 0.020535 # Percentage of non-idle cycles system.cpu.numCycles 0 # number of cpu cycles simulated -system.cpu.num_insts 59915182 # Number of instructions executed -system.cpu.num_refs 13979549 # Number of memory references +system.cpu.num_insts 57989043 # Number of instructions executed +system.cpu.num_refs 13753099 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). -system.disk0.dma_write_bytes 2521088 # Number of bytes transfered via DMA writes. -system.disk0.dma_write_full_pages 285 # Number of full page size DMA writes. -system.disk0.dma_write_txs 375 # Number of DMA write transactions. +system.disk0.dma_write_bytes 2735104 # Number of bytes transfered via DMA writes. +system.disk0.dma_write_full_pages 306 # Number of full page size DMA writes. +system.disk0.dma_write_txs 412 # Number of DMA write transactions. system.disk2.dma_read_bytes 0 # Number of bytes transfered via DMA reads (not PRD). system.disk2.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk2.dma_read_txs 0 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout index 2739943d2..88e69a41f 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Sep 5 2006 15:32:34 -M5 started Tue Sep 5 15:43:59 2006 +M5 compiled Oct 5 2006 22:13:02 +M5 started Fri Oct 6 00:24:58 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing -Exiting @ tick 3506218170 because m5_exit instruction encountered +Exiting @ tick 3493777466 because m5_exit instruction encountered -- cgit v1.2.3 From 54cf456fd15b6c88010d35ca310b18f7a415114e Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Fri, 6 Oct 2006 00:42:39 -0400 Subject: add an option for defining a directory in which to place all your checkpoints. if none, default is cwd. --HG-- extra : convert_revision : 23a602c2d800c922346c9743cc0c583d178a0ee7 --- configs/example/fs.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/configs/example/fs.py b/configs/example/fs.py index 31b31529f..5edda6e5f 100644 --- a/configs/example/fs.py +++ b/configs/example/fs.py @@ -49,10 +49,12 @@ parser.add_option("--dual", action="store_true", parser.add_option("-b", "--benchmark", action="store", type="string", dest="benchmark", help="Specify the benchmark to run. Available benchmarks: %s"\ - % DefinedBenchmarks) + % DefinedBenchmarks) parser.add_option("--etherdump", action="store", type="string", dest="etherdump", - help="Specify the filename to dump a pcap capture of the ethernet" - "traffic") + help="Specify the filename to dump a pcap capture of the" \ + "ethernet traffic") +parser.add_option("--checkpoint_dir", action="store", type="string", + help="Place all checkpoints in this absolute directory") (options, args) = parser.parse_args() @@ -123,7 +125,11 @@ else: exit_event = m5.simulate(maxtick) while exit_event.getCause() == "checkpoint": - m5.checkpoint(root, "cpt.%d") + if options.checkpoint_dir: + m5.checkpoint(root, "/".join([options.checkpoint_dir, "cpt.%d"])) + else: + m5.checkpoint(root, "cpt.%d") + if maxtick == -1: exit_event = m5.simulate(maxtick) else: -- cgit v1.2.3 From 9c901225f8f18d0d2f5325436983d685a4fe2245 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Fri, 6 Oct 2006 01:27:02 -0400 Subject: there are two main thrusts of this changeset. 1) return the periodicity of checkpoints back into the code (i.e. make m5 checkpoint n m meaningful again). 2) to do this, i had to much around with being able to repeatedly schedule and SimLoopExitEvent, which led to changes in how exit simloop events are handled to make this easier. src/arch/alpha/isa/decoder.isa: src/mem/cache/cache_impl.hh: modify arg. order for new calling convention of exitSimLoop. src/cpu/base.cc: src/sim/main.cc: src/sim/pseudo_inst.cc: src/sim/root.cc: now, instead of creating a new SimLoopExitEvent, call a wrapper schedExitSimLoop which handles all the default args. src/sim/sim_events.cc: src/sim/sim_events.hh: src/sim/sim_exit.hh: add the periodicity of checkpointing back into the code. to facilitate this, there are now two wrappers (instead of just overloading exitSimLoop). exitSimLoop is only for exiting NOW (i.e. at curTick), while schedExitSimLoop schedules and exit event for the future. --HG-- extra : convert_revision : c61f4bf05517172edd2c83368fd10bb0f0678029 --- src/arch/alpha/isa/decoder.isa | 2 +- src/cpu/base.cc | 11 +++++++---- src/mem/cache/cache_impl.hh | 4 ++-- src/sim/main.cc | 4 ++-- src/sim/pseudo_inst.cc | 10 +++++++--- src/sim/root.cc | 2 +- src/sim/sim_events.cc | 17 +++++++++++++---- src/sim/sim_events.hh | 18 +++++++++++------- src/sim/sim_exit.hh | 8 ++++++-- 9 files changed, 50 insertions(+), 26 deletions(-) diff --git a/src/arch/alpha/isa/decoder.isa b/src/arch/alpha/isa/decoder.isa index 4fc9da3f3..5bd19b677 100644 --- a/src/arch/alpha/isa/decoder.isa +++ b/src/arch/alpha/isa/decoder.isa @@ -701,7 +701,7 @@ decode OPCODE default Unknown::unknown() { 0x00: decode PALFUNC { format EmulatedCallPal { 0x00: halt ({{ - exitSimLoop(curTick, "halt instruction encountered"); + exitSimLoop("halt instruction encountered"); }}, IsNonSpeculative); 0x83: callsys({{ xc->syscall(R0); diff --git a/src/cpu/base.cc b/src/cpu/base.cc index 513dd7c55..ea4b03bf2 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -41,6 +41,7 @@ #include "cpu/cpuevent.hh" #include "cpu/thread_context.hh" #include "cpu/profile.hh" +#include "sim/sim_exit.hh" #include "sim/param.hh" #include "sim/process.hh" #include "sim/sim_events.hh" @@ -125,8 +126,9 @@ BaseCPU::BaseCPU(Params *p) // if (p->max_insts_any_thread != 0) for (int i = 0; i < number_of_threads; ++i) - new SimLoopExitEvent(comInstEventQueue[i], p->max_insts_any_thread, - "a thread reached the max instruction count"); + schedExitSimLoop("a thread reached the max instruction count", + p->max_insts_any_thread, 0, + comInstEventQueue[i]); if (p->max_insts_all_threads != 0) { // allocate & initialize shared downcounter: each event will @@ -150,8 +152,9 @@ BaseCPU::BaseCPU(Params *p) // if (p->max_loads_any_thread != 0) for (int i = 0; i < number_of_threads; ++i) - new SimLoopExitEvent(comLoadEventQueue[i], p->max_loads_any_thread, - "a thread reached the max load count"); + schedExitSimLoop("a thread reached the max load count", + p->max_loads_any_thread, 0, + comLoadEventQueue[i]); if (p->max_loads_all_threads != 0) { // allocate & initialize shared downcounter: each event will diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 11cd84e88..593dbecf3 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -51,7 +51,7 @@ #include "mem/cache/miss/mshr.hh" #include "mem/cache/prefetch/prefetcher.hh" -#include "sim/sim_events.hh" // for SimExitEvent +#include "sim/sim_exit.hh" // for SimExitEvent template bool @@ -254,7 +254,7 @@ Cache::access(PacketPtr &pkt) if (missCount) { --missCount; if (missCount == 0) - new SimLoopExitEvent(curTick, "A cache reached the maximum miss count"); + exitSimLoop("A cache reached the maximum miss count"); } } missQueue->handleMiss(pkt, size, curTick + hitLatency); diff --git a/src/sim/main.cc b/src/sim/main.cc index 728b7b810..874d0ac85 100644 --- a/src/sim/main.cc +++ b/src/sim/main.cc @@ -317,8 +317,8 @@ simulate(Tick num_cycles = -1) else num_cycles = curTick + num_cycles; - Event *limit_event = new SimLoopExitEvent(num_cycles, - "simulate() limit reached"); + Event *limit_event = schedExitSimLoop("simulate() limit reached", + num_cycles); while (1) { // there should always be at least one event (the SimLoopExitEvent diff --git a/src/sim/pseudo_inst.cc b/src/sim/pseudo_inst.cc index b66c78b2c..addf897c6 100644 --- a/src/sim/pseudo_inst.cc +++ b/src/sim/pseudo_inst.cc @@ -138,14 +138,14 @@ namespace AlphaPseudo void m5exit_old(ThreadContext *tc) { - exitSimLoop(curTick, "m5_exit_old instruction encountered"); + exitSimLoop("m5_exit_old instruction encountered"); } void m5exit(ThreadContext *tc, Tick delay) { Tick when = curTick + delay * Clock::Int::ns; - exitSimLoop(when, "m5_exit instruction encountered"); + schedExitSimLoop("m5_exit instruction encountered", when); } void @@ -270,7 +270,11 @@ namespace AlphaPseudo { if (!doCheckpointInsts) return; - exitSimLoop("checkpoint"); + + Tick when = curTick + delay * Clock::Int::ns; + Tick repeat = period * Clock::Int::ns; + + schedExitSimLoop("checkpoint", when, repeat); } uint64_t diff --git a/src/sim/root.cc b/src/sim/root.cc index ec5e2f7e2..565b57269 100644 --- a/src/sim/root.cc +++ b/src/sim/root.cc @@ -100,7 +100,7 @@ void Root::startup() { if (max_tick != 0) - exitSimLoop(curTick + max_tick, "reached maximum cycle count"); + schedExitSimLoop("reached maximum cycle count", curTick + max_tick); if (progress_interval != 0) new ProgressEvent(&mainEventQueue, progress_interval); diff --git a/src/sim/sim_events.cc b/src/sim/sim_events.cc index d9e8bdeaa..2ccc9dad2 100644 --- a/src/sim/sim_events.cc +++ b/src/sim/sim_events.cc @@ -57,6 +57,11 @@ SimLoopExitEvent::process() // otherwise do nothing... the IsExitEvent flag takes care of // exiting the simulation loop and returning this object to Python + + // but if you are doing this on intervals, don't forget to make another + if (repeat) { + schedule(curTick + repeat); + } } @@ -66,16 +71,20 @@ SimLoopExitEvent::description() return "simulation loop exit"; } -void -exitSimLoop(Tick when, const std::string &message, int exit_code) +SimLoopExitEvent * +schedExitSimLoop(const std::string &message, Tick when, Tick repeat, + EventQueue *q, int exit_code) { - new SimLoopExitEvent(when, message, exit_code); + if (q == NULL) + q = &mainEventQueue; + + return new SimLoopExitEvent(q, when, repeat, message, exit_code); } void exitSimLoop(const std::string &message, int exit_code) { - exitSimLoop(curTick, message, exit_code); + schedExitSimLoop(message, curTick, 0, NULL, exit_code); } void diff --git a/src/sim/sim_events.hh b/src/sim/sim_events.hh index 3c4a9dd05..e1576b38c 100644 --- a/src/sim/sim_events.hh +++ b/src/sim/sim_events.hh @@ -42,6 +42,7 @@ class SimLoopExitEvent : public Event // string explaining why we're terminating std::string cause; int code; + Tick repeat; public: // Default constructor. Only really used for derived classes. @@ -49,16 +50,19 @@ class SimLoopExitEvent : public Event : Event(&mainEventQueue, Sim_Exit_Pri) { } - SimLoopExitEvent(Tick _when, const std::string &_cause, int c = 0) - : Event(&mainEventQueue, Sim_Exit_Pri), cause(_cause), - code(c) - { setFlags(IsExitEvent); schedule(_when); } - SimLoopExitEvent(EventQueue *q, - Tick _when, const std::string &_cause, int c = 0) - : Event(q, Sim_Exit_Pri), cause(_cause), code(c) + Tick _when, Tick _repeat, const std::string &_cause, + int c = 0) + : Event(q, Sim_Exit_Pri), cause(_cause), + code(c), repeat(_repeat) { setFlags(IsExitEvent); schedule(_when); } +// SimLoopExitEvent(EventQueue *q, +// Tick _when, const std::string &_cause, +// Tick _repeat = 0, int c = 0) +// : Event(q, Sim_Exit_Pri), cause(_cause), code(c), repeat(_repeat) +// { setFlags(IsExitEvent); schedule(_when); } + std::string getCause() { return cause; } int getCode() { return code; } diff --git a/src/sim/sim_exit.hh b/src/sim/sim_exit.hh index 545bf4ae0..d4b31d1ea 100644 --- a/src/sim/sim_exit.hh +++ b/src/sim/sim_exit.hh @@ -38,6 +38,8 @@ // forward declaration class Callback; +class EventQueue; +class SimLoopExitEvent; /// Register a callback to be called when Python exits. Defined in /// sim/main.cc. @@ -47,12 +49,14 @@ void registerExitCallback(Callback *); /// Python) at the indicated tick. The message and exit_code /// parameters are saved in the SimLoopExitEvent to indicate why the /// exit occurred. -void exitSimLoop(Tick when, const std::string &message, int exit_code = 0); +SimLoopExitEvent *schedExitSimLoop(const std::string &message, Tick when, + Tick repeat = 0, EventQueue *q = NULL, + int exit_code = 0); /// Schedule an event to exit the simulation loop (returning to /// Python) at the end of the current cycle (curTick). The message /// and exit_code parameters are saved in the SimLoopExitEvent to /// indicate why the exit occurred. -void exitSimLoop(const std::string &cause, int exit_code = 0); +void exitSimLoop(const std::string &message, int exit_code = 0); #endif // __SIM_EXIT_HH__ -- cgit v1.2.3 From fb3a30f87cb699d9a39240d52d1dba3feb0b64c3 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Fri, 6 Oct 2006 01:29:50 -0400 Subject: checkpoint recovery was screwed up because a new section was created in the middle of another section and messed up unserializing. --HG-- extra : convert_revision : 7af15fdc9e8d203b26840a2eb5fef511b6a2b21d --- src/cpu/simple/atomic.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 7ba1b7df1..88698bfee 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -161,9 +161,9 @@ AtomicSimpleCPU::serialize(ostream &os) { SimObject::State so_state = SimObject::getState(); SERIALIZE_ENUM(so_state); + BaseSimpleCPU::serialize(os); nameOut(os, csprintf("%s.tickEvent", name())); tickEvent.serialize(os); - BaseSimpleCPU::serialize(os); } void @@ -171,8 +171,8 @@ AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) { SimObject::State so_state; UNSERIALIZE_ENUM(so_state); - tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); BaseSimpleCPU::unserialize(cp, section); + tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); } void -- cgit v1.2.3 From 84dac99e2ef0c0c4fcf54d0ebba647b60448c049 Mon Sep 17 00:00:00 2001 From: Korey Sewell Date: Fri, 6 Oct 2006 04:23:27 -0400 Subject: add SMT hello world test - 2 threads --HG-- extra : convert_revision : 54cb19d1325295895b6f0b992499bbb0216b45df --- .../ref/alpha/linux/o3-timing/config.ini | 315 +++++++++++++++++++++ .../ref/alpha/linux/o3-timing/config.out | 308 ++++++++++++++++++++ .../ref/alpha/linux/o3-timing/m5stats.txt | 305 ++++++++++++++++++++ .../ref/alpha/linux/o3-timing/stderr | 6 + .../ref/alpha/linux/o3-timing/stdout | 14 + tests/quick/01.hello-2T-smt/test.py | 32 +++ 6 files changed, 980 insertions(+) create mode 100644 tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini create mode 100644 tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out create mode 100644 tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt create mode 100644 tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr create mode 100644 tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout create mode 100644 tests/quick/01.hello-2T-smt/test.py diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini new file mode 100644 index 000000000..bd25cdab9 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini @@ -0,0 +1,315 @@ +[root] +type=Root +children=system +checkpoint= +clock=1000000000000 +max_tick=0 +output_file=cout +progress_interval=0 + +[debug] +break_cycles= + +[exetrace] +intel_format=false +pc_symbol=true +print_cpseq=false +print_cycle=true +print_data=true +print_effaddr=true +print_fetchseq=false +print_iregs=false +print_opclass=true +print_thread=true +speculative=true +trace_system=client + +[serialize] +count=10 +cycle=0 +dir=cpt.%012d +period=0 + +[stats] +descriptions=true +dump_cycle=0 +dump_period=0 +dump_reset=false +ignore_events= +mysql_db= +mysql_host= +mysql_password= +mysql_user= +project_name=test +simulation_name=test +simulation_sample=0 +text_compat=true +text_file=m5stats.txt + +[system] +type=System +children=cpu membus physmem +mem_mode=timing +physmem=system.physmem + +[system.cpu] +type=DerivO3CPU +children=fuPool workload0 workload1 +BTBEntries=4096 +BTBTagSize=16 +LFSTSize=1024 +LQEntries=32 +RASSize=16 +SQEntries=32 +SSITSize=1024 +activity=0 +backComSize=5 +choiceCtrBits=2 +choicePredictorSize=8192 +clock=1 +commitToDecodeDelay=1 +commitToFetchDelay=1 +commitToIEWDelay=1 +commitToRenameDelay=1 +commitWidth=8 +decodeToFetchDelay=1 +decodeToRenameDelay=1 +decodeWidth=8 +defer_registration=false +dispatchWidth=8 +fetchToDecodeDelay=1 +fetchTrapLatency=1 +fetchWidth=8 +forwardComSize=5 +fuPool=system.cpu.fuPool +function_trace=false +function_trace_start=0 +globalCtrBits=2 +globalHistoryBits=13 +globalPredictorSize=8192 +iewToCommitDelay=1 +iewToDecodeDelay=1 +iewToFetchDelay=1 +iewToRenameDelay=1 +instShiftAmt=2 +issueToExecuteDelay=1 +issueWidth=8 +localCtrBits=2 +localHistoryBits=11 +localHistoryTableSize=2048 +localPredictorSize=2048 +max_insts_all_threads=0 +max_insts_any_thread=0 +max_loads_all_threads=0 +max_loads_any_thread=0 +mem=system.physmem +numIQEntries=64 +numPhysFloatRegs=256 +numPhysIntRegs=256 +numROBEntries=192 +numRobs=1 +numThreads=1 +predType=tournament +progress_interval=0 +renameToDecodeDelay=1 +renameToFetchDelay=1 +renameToIEWDelay=2 +renameToROBDelay=1 +renameWidth=8 +squashWidth=8 +system=system +trapLatency=13 +wbDepth=1 +wbWidth=8 +workload=system.cpu.workload0 system.cpu.workload1 +dcache_port=system.membus.port[2] +icache_port=system.membus.port[1] + +[system.cpu.fuPool] +type=FUPool +children=FUList0 FUList1 FUList2 FUList3 FUList4 FUList5 FUList6 FUList7 +FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 + +[system.cpu.fuPool.FUList0] +type=FUDesc +children=opList0 +count=6 +opList=system.cpu.fuPool.FUList0.opList0 + +[system.cpu.fuPool.FUList0.opList0] +type=OpDesc +issueLat=1 +opClass=IntAlu +opLat=1 + +[system.cpu.fuPool.FUList1] +type=FUDesc +children=opList0 opList1 +count=2 +opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1 + +[system.cpu.fuPool.FUList1.opList0] +type=OpDesc +issueLat=1 +opClass=IntMult +opLat=3 + +[system.cpu.fuPool.FUList1.opList1] +type=OpDesc +issueLat=19 +opClass=IntDiv +opLat=20 + +[system.cpu.fuPool.FUList2] +type=FUDesc +children=opList0 opList1 opList2 +count=4 +opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2 + +[system.cpu.fuPool.FUList2.opList0] +type=OpDesc +issueLat=1 +opClass=FloatAdd +opLat=2 + +[system.cpu.fuPool.FUList2.opList1] +type=OpDesc +issueLat=1 +opClass=FloatCmp +opLat=2 + +[system.cpu.fuPool.FUList2.opList2] +type=OpDesc +issueLat=1 +opClass=FloatCvt +opLat=2 + +[system.cpu.fuPool.FUList3] +type=FUDesc +children=opList0 opList1 opList2 +count=2 +opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2 + +[system.cpu.fuPool.FUList3.opList0] +type=OpDesc +issueLat=1 +opClass=FloatMult +opLat=4 + +[system.cpu.fuPool.FUList3.opList1] +type=OpDesc +issueLat=12 +opClass=FloatDiv +opLat=12 + +[system.cpu.fuPool.FUList3.opList2] +type=OpDesc +issueLat=24 +opClass=FloatSqrt +opLat=24 + +[system.cpu.fuPool.FUList4] +type=FUDesc +children=opList0 +count=0 +opList=system.cpu.fuPool.FUList4.opList0 + +[system.cpu.fuPool.FUList4.opList0] +type=OpDesc +issueLat=1 +opClass=MemRead +opLat=1 + +[system.cpu.fuPool.FUList5] +type=FUDesc +children=opList0 +count=0 +opList=system.cpu.fuPool.FUList5.opList0 + +[system.cpu.fuPool.FUList5.opList0] +type=OpDesc +issueLat=1 +opClass=MemWrite +opLat=1 + +[system.cpu.fuPool.FUList6] +type=FUDesc +children=opList0 opList1 +count=4 +opList=system.cpu.fuPool.FUList6.opList0 system.cpu.fuPool.FUList6.opList1 + +[system.cpu.fuPool.FUList6.opList0] +type=OpDesc +issueLat=1 +opClass=MemRead +opLat=1 + +[system.cpu.fuPool.FUList6.opList1] +type=OpDesc +issueLat=1 +opClass=MemWrite +opLat=1 + +[system.cpu.fuPool.FUList7] +type=FUDesc +children=opList0 +count=1 +opList=system.cpu.fuPool.FUList7.opList0 + +[system.cpu.fuPool.FUList7.opList0] +type=OpDesc +issueLat=3 +opClass=IprAccess +opLat=3 + +[system.cpu.workload0] +type=LiveProcess +cmd=tests/test-progs/hello/bin/alpha/linux/hello +egid=100 +env= +euid=100 +executable=tests/test-progs/hello/bin/alpha/linux/hello +gid=100 +input=cin +output=cout +pid=100 +ppid=99 +system=system +uid=100 + +[system.cpu.workload1] +type=LiveProcess +cmd=tests/test-progs/hello/bin/alpha/linux/hello +egid=100 +env= +euid=100 +executable=tests/test-progs/hello/bin/alpha/linux/hello +gid=100 +input=cin +output=cout +pid=100 +ppid=99 +system=system +uid=100 + +[system.membus] +type=Bus +bus_id=0 +port=system.physmem.port system.cpu.icache_port system.cpu.dcache_port + +[system.physmem] +type=PhysicalMemory +file= +latency=1 +range=0:134217727 +port=system.membus.port[0] + +[trace] +bufsize=0 +cycle=0 +dump_on_exit=false +file=cout +flags= +ignore= +start=0 + diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out new file mode 100644 index 000000000..6d68de2a1 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out @@ -0,0 +1,308 @@ +[root] +type=Root +clock=1000000000000 +max_tick=0 +progress_interval=0 +output_file=cout + +[system.physmem] +type=PhysicalMemory +file= +range=[0,134217727] +latency=1 + +[system] +type=System +physmem=system.physmem +mem_mode=timing + +[system.membus] +type=Bus +bus_id=0 + +[system.cpu.workload0] +type=LiveProcess +cmd=tests/test-progs/hello/bin/alpha/linux/hello +executable=tests/test-progs/hello/bin/alpha/linux/hello +input=cin +output=cout +env= +system=system +uid=100 +euid=100 +gid=100 +egid=100 +pid=100 +ppid=99 + +[system.cpu.workload1] +type=LiveProcess +cmd=tests/test-progs/hello/bin/alpha/linux/hello +executable=tests/test-progs/hello/bin/alpha/linux/hello +input=cin +output=cout +env= +system=system +uid=100 +euid=100 +gid=100 +egid=100 +pid=100 +ppid=99 + +[system.cpu.fuPool.FUList0.opList0] +type=OpDesc +opClass=IntAlu +opLat=1 +issueLat=1 + +[system.cpu.fuPool.FUList0] +type=FUDesc +opList=system.cpu.fuPool.FUList0.opList0 +count=6 + +[system.cpu.fuPool.FUList1.opList0] +type=OpDesc +opClass=IntMult +opLat=3 +issueLat=1 + +[system.cpu.fuPool.FUList1.opList1] +type=OpDesc +opClass=IntDiv +opLat=20 +issueLat=19 + +[system.cpu.fuPool.FUList1] +type=FUDesc +opList=system.cpu.fuPool.FUList1.opList0 system.cpu.fuPool.FUList1.opList1 +count=2 + +[system.cpu.fuPool.FUList2.opList0] +type=OpDesc +opClass=FloatAdd +opLat=2 +issueLat=1 + +[system.cpu.fuPool.FUList2.opList1] +type=OpDesc +opClass=FloatCmp +opLat=2 +issueLat=1 + +[system.cpu.fuPool.FUList2.opList2] +type=OpDesc +opClass=FloatCvt +opLat=2 +issueLat=1 + +[system.cpu.fuPool.FUList2] +type=FUDesc +opList=system.cpu.fuPool.FUList2.opList0 system.cpu.fuPool.FUList2.opList1 system.cpu.fuPool.FUList2.opList2 +count=4 + +[system.cpu.fuPool.FUList3.opList0] +type=OpDesc +opClass=FloatMult +opLat=4 +issueLat=1 + +[system.cpu.fuPool.FUList3.opList1] +type=OpDesc +opClass=FloatDiv +opLat=12 +issueLat=12 + +[system.cpu.fuPool.FUList3.opList2] +type=OpDesc +opClass=FloatSqrt +opLat=24 +issueLat=24 + +[system.cpu.fuPool.FUList3] +type=FUDesc +opList=system.cpu.fuPool.FUList3.opList0 system.cpu.fuPool.FUList3.opList1 system.cpu.fuPool.FUList3.opList2 +count=2 + +[system.cpu.fuPool.FUList4.opList0] +type=OpDesc +opClass=MemRead +opLat=1 +issueLat=1 + +[system.cpu.fuPool.FUList4] +type=FUDesc +opList=system.cpu.fuPool.FUList4.opList0 +count=0 + +[system.cpu.fuPool.FUList5.opList0] +type=OpDesc +opClass=MemWrite +opLat=1 +issueLat=1 + +[system.cpu.fuPool.FUList5] +type=FUDesc +opList=system.cpu.fuPool.FUList5.opList0 +count=0 + +[system.cpu.fuPool.FUList6.opList0] +type=OpDesc +opClass=MemRead +opLat=1 +issueLat=1 + +[system.cpu.fuPool.FUList6.opList1] +type=OpDesc +opClass=MemWrite +opLat=1 +issueLat=1 + +[system.cpu.fuPool.FUList6] +type=FUDesc +opList=system.cpu.fuPool.FUList6.opList0 system.cpu.fuPool.FUList6.opList1 +count=4 + +[system.cpu.fuPool.FUList7.opList0] +type=OpDesc +opClass=IprAccess +opLat=3 +issueLat=3 + +[system.cpu.fuPool.FUList7] +type=FUDesc +opList=system.cpu.fuPool.FUList7.opList0 +count=1 + +[system.cpu.fuPool] +type=FUPool +FUList=system.cpu.fuPool.FUList0 system.cpu.fuPool.FUList1 system.cpu.fuPool.FUList2 system.cpu.fuPool.FUList3 system.cpu.fuPool.FUList4 system.cpu.fuPool.FUList5 system.cpu.fuPool.FUList6 system.cpu.fuPool.FUList7 + +[system.cpu] +type=DerivO3CPU +clock=1 +numThreads=1 +activity=0 +workload=system.cpu.workload0 system.cpu.workload1 +mem=system.physmem +checker=null +max_insts_any_thread=0 +max_insts_all_threads=0 +max_loads_any_thread=0 +max_loads_all_threads=0 +progress_interval=0 +cachePorts=200 +decodeToFetchDelay=1 +renameToFetchDelay=1 +iewToFetchDelay=1 +commitToFetchDelay=1 +fetchWidth=8 +renameToDecodeDelay=1 +iewToDecodeDelay=1 +commitToDecodeDelay=1 +fetchToDecodeDelay=1 +decodeWidth=8 +iewToRenameDelay=1 +commitToRenameDelay=1 +decodeToRenameDelay=1 +renameWidth=8 +commitToIEWDelay=1 +renameToIEWDelay=2 +issueToExecuteDelay=1 +dispatchWidth=8 +issueWidth=8 +wbWidth=8 +wbDepth=1 +fuPool=system.cpu.fuPool +iewToCommitDelay=1 +renameToROBDelay=1 +commitWidth=8 +squashWidth=8 +trapLatency=13 +backComSize=5 +forwardComSize=5 +predType=tournament +localPredictorSize=2048 +localCtrBits=2 +localHistoryTableSize=2048 +localHistoryBits=11 +globalPredictorSize=8192 +globalCtrBits=2 +globalHistoryBits=13 +choicePredictorSize=8192 +choiceCtrBits=2 +BTBEntries=4096 +BTBTagSize=16 +RASSize=16 +LQEntries=32 +SQEntries=32 +LFSTSize=1024 +SSITSize=1024 +numPhysIntRegs=256 +numPhysFloatRegs=256 +numIQEntries=64 +numROBEntries=192 +smtNumFetchingThreads=1 +smtFetchPolicy=SingleThread +smtLSQPolicy=Partitioned +smtLSQThreshold=100 +smtIQPolicy=Partitioned +smtIQThreshold=100 +smtROBPolicy=Partitioned +smtROBThreshold=100 +smtCommitPolicy=RoundRobin +instShiftAmt=2 +defer_registration=false +function_trace=false +function_trace_start=0 + +[trace] +flags= +start=0 +cycle=0 +bufsize=0 +file=cout +dump_on_exit=false +ignore= + +[stats] +descriptions=true +project_name=test +simulation_name=test +simulation_sample=0 +text_file=m5stats.txt +text_compat=true +mysql_db= +mysql_user= +mysql_password= +mysql_host= +events_start=-1 +dump_reset=false +dump_cycle=0 +dump_period=0 +ignore_events= + +[random] +seed=1 + +[exetrace] +speculative=true +print_cycle=true +print_opclass=true +print_thread=true +print_effaddr=true +print_data=true +print_iregs=false +print_fetchseq=false +print_cpseq=false +print_reg_delta=false +pc_symbol=true +intel_format=false +trace_system=client + +[debug] +break_cycles= + +[statsreset] +reset_cycle=0 + diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt new file mode 100644 index 000000000..4473a39f8 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt @@ -0,0 +1,305 @@ + +---------- Begin Simulation Statistics ---------- +global.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly. +global.BPredUnit.BTBHits 669 # Number of BTB hits +global.BPredUnit.BTBLookups 3666 # Number of BTB lookups +global.BPredUnit.RASInCorrect 78 # Number of incorrect RAS predictions. +global.BPredUnit.condIncorrect 1050 # Number of conditional branches incorrect +global.BPredUnit.condPredicted 2479 # Number of conditional branches predicted +global.BPredUnit.lookups 4216 # Number of BP lookups +global.BPredUnit.usedRAS 545 # Number of times the RAS was used to get a target. +host_inst_rate 13879 # Simulator instruction rate (inst/s) +host_mem_usage 150244 # Number of bytes of host memory used +host_seconds 0.82 # Real time elapsed on the host +host_tick_rate 9101 # Simulator tick rate (ticks/s) +memdepunit.memDep.conflictingLoads 21 # Number of conflicting loads. +memdepunit.memDep.conflictingLoads 25 # Number of conflicting loads. +memdepunit.memDep.conflictingStores 0 # Number of conflicting stores. +memdepunit.memDep.conflictingStores 214 # Number of conflicting stores. +memdepunit.memDep.insertedLoads 1795 # Number of loads inserted to the mem dependence unit. +memdepunit.memDep.insertedLoads 1734 # Number of loads inserted to the mem dependence unit. +memdepunit.memDep.insertedStores 1066 # Number of stores inserted to the mem dependence unit. +memdepunit.memDep.insertedStores 1051 # Number of stores inserted to the mem dependence unit. +sim_freq 1000000000000 # Frequency of simulated ticks +sim_insts 11399 # Number of instructions simulated +sim_seconds 0.000000 # Number of seconds simulated +sim_ticks 7478 # Number of ticks simulated +system.cpu.commit.COM:branches 1756 # Number of branches committed +system.cpu.commit.COM:branches_0 878 # Number of branches committed +system.cpu.commit.COM:branches_1 878 # Number of branches committed +system.cpu.commit.COM:bw_lim_events 177 # number cycles where commit BW limit reached +system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits +system.cpu.commit.COM:bw_limited_0 0 # number of insts not committed due to BW limits +system.cpu.commit.COM:bw_limited_1 0 # number of insts not committed due to BW limits +system.cpu.commit.COM:committed_per_cycle.start_dist # Number of insts commited each cycle +system.cpu.commit.COM:committed_per_cycle.samples 7424 +system.cpu.commit.COM:committed_per_cycle.min_value 0 + 0 3237 4360.18% + 1 1635 2202.32% + 2 920 1239.22% + 3 476 641.16% + 4 347 467.40% + 5 246 331.36% + 6 206 277.48% + 7 180 242.46% + 8 177 238.42% +system.cpu.commit.COM:committed_per_cycle.max_value 8 +system.cpu.commit.COM:committed_per_cycle.end_dist + +system.cpu.commit.COM:count 11433 # Number of instructions committed +system.cpu.commit.COM:count_0 5716 # Number of instructions committed +system.cpu.commit.COM:count_1 5717 # Number of instructions committed +system.cpu.commit.COM:loads 1976 # Number of loads committed +system.cpu.commit.COM:loads_0 988 # Number of loads committed +system.cpu.commit.COM:loads_1 988 # Number of loads committed +system.cpu.commit.COM:membars 0 # Number of memory barriers committed +system.cpu.commit.COM:membars_0 0 # Number of memory barriers committed +system.cpu.commit.COM:membars_1 0 # Number of memory barriers committed +system.cpu.commit.COM:refs 3600 # Number of memory references committed +system.cpu.commit.COM:refs_0 1800 # Number of memory references committed +system.cpu.commit.COM:refs_1 1800 # Number of memory references committed +system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed +system.cpu.commit.COM:swp_count_0 0 # Number of s/w prefetches committed +system.cpu.commit.COM:swp_count_1 0 # Number of s/w prefetches committed +system.cpu.commit.branchMispredicts 789 # The number of times a branch was mispredicted +system.cpu.commit.commitCommittedInsts 11433 # The number of committed instructions +system.cpu.commit.commitNonSpecStalls 34 # The number of times commit has been forced to stall to communicate backwards +system.cpu.commit.commitSquashedInsts 6802 # The number of squashed insts skipped by commit +system.cpu.committedInsts_0 5699 # Number of Instructions Simulated +system.cpu.committedInsts_1 5700 # Number of Instructions Simulated +system.cpu.committedInsts_total 11399 # Number of Instructions Simulated +system.cpu.cpi_0 1.312160 # CPI: Cycles Per Instruction +system.cpu.cpi_1 1.311930 # CPI: Cycles Per Instruction +system.cpu.cpi_total 0.656022 # CPI: Total CPI of All Threads +system.cpu.decode.DECODE:BlockedCycles 1617 # Number of cycles decode is blocked +system.cpu.decode.DECODE:BranchMispred 282 # Number of times decode detected a branch misprediction +system.cpu.decode.DECODE:BranchResolved 364 # Number of times decode resolved a branch +system.cpu.decode.DECODE:DecodedInsts 22220 # Number of instructions handled by decode +system.cpu.decode.DECODE:IdleCycles 8058 # Number of cycles decode is idle +system.cpu.decode.DECODE:RunCycles 3571 # Number of cycles decode is running +system.cpu.decode.DECODE:SquashCycles 1260 # Number of cycles decode is squashing +system.cpu.decode.DECODE:SquashedInsts 200 # Number of squashed instructions handled by decode +system.cpu.decode.DECODE:UnblockCycles 277 # Number of cycles decode is unblocking +system.cpu.fetch.Branches 4216 # Number of branches that fetch encountered +system.cpu.fetch.CacheLines 2762 # Number of cache lines fetched +system.cpu.fetch.Cycles 6837 # Number of cycles fetch has run and was not squashing or blocked +system.cpu.fetch.Insts 25142 # Number of instructions fetch has processed +system.cpu.fetch.SquashCycles 1098 # Number of cycles fetch has spent squashing +system.cpu.fetch.branchRate 0.563712 # Number of branch fetches per cycle +system.cpu.fetch.icacheStallCycles 2762 # Number of cycles fetch is stalled on an Icache miss +system.cpu.fetch.predictedBranches 1214 # Number of branches that fetch has predicted taken +system.cpu.fetch.rate 3.361679 # Number of inst fetches per cycle +system.cpu.fetch.rateDist.start_dist # Number of instructions fetched each cycle (Total) +system.cpu.fetch.rateDist.samples 7479 +system.cpu.fetch.rateDist.min_value 0 + 0 3407 4555.42% + 1 266 355.66% + 2 222 296.83% + 3 265 354.33% + 4 317 423.85% + 5 275 367.70% + 6 279 373.04% + 7 264 352.99% + 8 2184 2920.18% +system.cpu.fetch.rateDist.max_value 8 +system.cpu.fetch.rateDist.end_dist + +system.cpu.iew.EXEC:branches 2294 # Number of branches executed +system.cpu.iew.EXEC:branches_0 1156 # Number of branches executed +system.cpu.iew.EXEC:branches_1 1138 # Number of branches executed +system.cpu.iew.EXEC:nop 59 # number of nop insts executed +system.cpu.iew.EXEC:nop_0 31 # number of nop insts executed +system.cpu.iew.EXEC:nop_1 28 # number of nop insts executed +system.cpu.iew.EXEC:rate 1.993582 # Inst execution rate +system.cpu.iew.EXEC:refs 4718 # number of memory reference insts executed +system.cpu.iew.EXEC:refs_0 2364 # number of memory reference insts executed +system.cpu.iew.EXEC:refs_1 2354 # number of memory reference insts executed +system.cpu.iew.EXEC:stores 1857 # Number of stores executed +system.cpu.iew.EXEC:stores_0 924 # Number of stores executed +system.cpu.iew.EXEC:stores_1 933 # Number of stores executed +system.cpu.iew.EXEC:swp 0 # number of swp insts executed +system.cpu.iew.EXEC:swp_0 0 # number of swp insts executed +system.cpu.iew.EXEC:swp_1 0 # number of swp insts executed +system.cpu.iew.WB:consumers 9920 # num instructions consuming a value +system.cpu.iew.WB:consumers_0 4998 # num instructions consuming a value +system.cpu.iew.WB:consumers_1 4922 # num instructions consuming a value +system.cpu.iew.WB:count 14666 # cumulative count of insts written-back +system.cpu.iew.WB:count_0 7373 # cumulative count of insts written-back +system.cpu.iew.WB:count_1 7293 # cumulative count of insts written-back +system.cpu.iew.WB:fanout 0.776915 # average fanout of values written-back +system.cpu.iew.WB:fanout_0 0.775710 # average fanout of values written-back +system.cpu.iew.WB:fanout_1 0.778139 # average fanout of values written-back +system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ +system.cpu.iew.WB:penalized_0 0 # number of instrctions required to write to 'other' IQ +system.cpu.iew.WB:penalized_1 0 # number of instrctions required to write to 'other' IQ +system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ +system.cpu.iew.WB:penalized_rate_0 0 # fraction of instructions written-back that wrote to 'other' IQ +system.cpu.iew.WB:penalized_rate_1 0 # fraction of instructions written-back that wrote to 'other' IQ +system.cpu.iew.WB:producers 7707 # num instructions producing a value +system.cpu.iew.WB:producers_0 3877 # num instructions producing a value +system.cpu.iew.WB:producers_1 3830 # num instructions producing a value +system.cpu.iew.WB:rate 1.960957 # insts written-back per cycle +system.cpu.iew.WB:rate_0 0.985827 # insts written-back per cycle +system.cpu.iew.WB:rate_1 0.975130 # insts written-back per cycle +system.cpu.iew.WB:sent 14753 # cumulative count of insts sent to commit +system.cpu.iew.WB:sent_0 7419 # cumulative count of insts sent to commit +system.cpu.iew.WB:sent_1 7334 # cumulative count of insts sent to commit +system.cpu.iew.branchMispredicts 869 # Number of branch mispredicts detected at execute +system.cpu.iew.iewBlockCycles 3 # Number of cycles IEW is blocking +system.cpu.iew.iewDispLoadInsts 3529 # Number of dispatched load instructions +system.cpu.iew.iewDispNonSpecInsts 39 # Number of dispatched non-speculative instructions +system.cpu.iew.iewDispSquashedInsts 789 # Number of squashed instructions skipped by dispatch +system.cpu.iew.iewDispStoreInsts 2117 # Number of dispatched store instructions +system.cpu.iew.iewDispatchedInsts 18235 # Number of instructions dispatched to IQ +system.cpu.iew.iewExecLoadInsts 2861 # Number of load instructions executed +system.cpu.iew.iewExecLoadInsts_0 1440 # Number of load instructions executed +system.cpu.iew.iewExecLoadInsts_1 1421 # Number of load instructions executed +system.cpu.iew.iewExecSquashedInsts 1188 # Number of squashed instructions skipped in execute +system.cpu.iew.iewExecutedInsts 14910 # Number of executed instructions +system.cpu.iew.iewIQFullEvents 0 # Number of times the IQ has become full, causing a stall +system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle +system.cpu.iew.iewLSQFullEvents 0 # Number of times the LSQ has become full, causing a stall +system.cpu.iew.iewSquashCycles 1260 # Number of cycles IEW is squashing +system.cpu.iew.iewUnblockCycles 0 # Number of cycles IEW is unblocking +system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding +system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked +system.cpu.iew.lsq.thread.0.forwLoads 45 # Number of loads that had data forwarded from stores +system.cpu.iew.lsq.thread.0.ignoredResponses 4 # Number of memory responses ignored because the instruction is squashed +system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address +system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address +system.cpu.iew.lsq.thread.0.memOrderViolation 27 # Number of memory ordering violations +system.cpu.iew.lsq.thread.0.rescheduledLoads 0 # Number of loads that were rescheduled +system.cpu.iew.lsq.thread.0.squashedLoads 807 # Number of loads squashed +system.cpu.iew.lsq.thread.0.squashedStores 254 # Number of stores squashed +system.cpu.iew.lsq.thread.1.blockedLoads 1 # Number of blocked loads due to partial load-store forwarding +system.cpu.iew.lsq.thread.1.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked +system.cpu.iew.lsq.thread.1.forwLoads 43 # Number of loads that had data forwarded from stores +system.cpu.iew.lsq.thread.1.ignoredResponses 2 # Number of memory responses ignored because the instruction is squashed +system.cpu.iew.lsq.thread.1.invAddrLoads 0 # Number of loads ignored due to an invalid address +system.cpu.iew.lsq.thread.1.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address +system.cpu.iew.lsq.thread.1.memOrderViolation 29 # Number of memory ordering violations +system.cpu.iew.lsq.thread.1.rescheduledLoads 1 # Number of loads that were rescheduled +system.cpu.iew.lsq.thread.1.squashedLoads 746 # Number of loads squashed +system.cpu.iew.lsq.thread.1.squashedStores 239 # Number of stores squashed +system.cpu.iew.memOrderViolationEvents 56 # Number of memory order violations +system.cpu.iew.predictedNotTakenIncorrect 733 # Number of branches that were predicted not taken incorrectly +system.cpu.iew.predictedTakenIncorrect 136 # Number of branches that were predicted taken incorrectly +system.cpu.ipc_0 0.762102 # IPC: Instructions Per Cycle +system.cpu.ipc_1 0.762236 # IPC: Instructions Per Cycle +system.cpu.ipc_total 1.524338 # IPC: Total IPC of All Threads +system.cpu.iq.ISSUE:FU_type_0 8140 # Type of FU issued +system.cpu.iq.ISSUE:FU_type_0.start_dist +(null) 2 0.02% # Type of FU issued +IntAlu 5556 68.26% # Type of FU issued +IntMult 1 0.01% # Type of FU issued +IntDiv 0 0.00% # Type of FU issued +FloatAdd 2 0.02% # Type of FU issued +FloatCmp 0 0.00% # Type of FU issued +FloatCvt 0 0.00% # Type of FU issued +FloatMult 0 0.00% # Type of FU issued +FloatDiv 0 0.00% # Type of FU issued +FloatSqrt 0 0.00% # Type of FU issued +MemRead 1619 19.89% # Type of FU issued +MemWrite 960 11.79% # Type of FU issued +IprAccess 0 0.00% # Type of FU issued +InstPrefetch 0 0.00% # Type of FU issued +system.cpu.iq.ISSUE:FU_type_0.end_dist +system.cpu.iq.ISSUE:FU_type_1 7958 # Type of FU issued +system.cpu.iq.ISSUE:FU_type_1.start_dist + (null) 2 0.03% # Type of FU issued + IntAlu 5440 68.36% # Type of FU issued + IntMult 1 0.01% # Type of FU issued + IntDiv 0 0.00% # Type of FU issued + FloatAdd 2 0.03% # Type of FU issued + FloatCmp 0 0.00% # Type of FU issued + FloatCvt 0 0.00% # Type of FU issued + FloatMult 0 0.00% # Type of FU issued + FloatDiv 0 0.00% # Type of FU issued + FloatSqrt 0 0.00% # Type of FU issued + MemRead 1553 19.51% # Type of FU issued + MemWrite 960 12.06% # Type of FU issued + IprAccess 0 0.00% # Type of FU issued + InstPrefetch 0 0.00% # Type of FU issued +system.cpu.iq.ISSUE:FU_type_1.end_dist +system.cpu.iq.ISSUE:FU_type 16098 # Type of FU issued +system.cpu.iq.ISSUE:FU_type.start_dist + (null) 4 0.02% # Type of FU issued + IntAlu 10996 68.31% # Type of FU issued + IntMult 2 0.01% # Type of FU issued + IntDiv 0 0.00% # Type of FU issued + FloatAdd 4 0.02% # Type of FU issued + FloatCmp 0 0.00% # Type of FU issued + FloatCvt 0 0.00% # Type of FU issued + FloatMult 0 0.00% # Type of FU issued + FloatDiv 0 0.00% # Type of FU issued + FloatSqrt 0 0.00% # Type of FU issued + MemRead 3172 19.70% # Type of FU issued + MemWrite 1920 11.93% # Type of FU issued + IprAccess 0 0.00% # Type of FU issued + InstPrefetch 0 0.00% # Type of FU issued +system.cpu.iq.ISSUE:FU_type.end_dist +system.cpu.iq.ISSUE:fu_busy_cnt 198 # FU busy when requested +system.cpu.iq.ISSUE:fu_busy_cnt_0 101 # FU busy when requested +system.cpu.iq.ISSUE:fu_busy_cnt_1 97 # FU busy when requested +system.cpu.iq.ISSUE:fu_busy_rate 0.012300 # FU busy rate (busy events/executed inst) +system.cpu.iq.ISSUE:fu_busy_rate_0 0.006274 # FU busy rate (busy events/executed inst) +system.cpu.iq.ISSUE:fu_busy_rate_1 0.006026 # FU busy rate (busy events/executed inst) +system.cpu.iq.ISSUE:fu_full.start_dist + (null) 0 0.00% # attempts to use FU when none available + IntAlu 9 4.55% # attempts to use FU when none available + IntMult 0 0.00% # attempts to use FU when none available + IntDiv 0 0.00% # attempts to use FU when none available + FloatAdd 0 0.00% # attempts to use FU when none available + FloatCmp 0 0.00% # attempts to use FU when none available + FloatCvt 0 0.00% # attempts to use FU when none available + FloatMult 0 0.00% # attempts to use FU when none available + FloatDiv 0 0.00% # attempts to use FU when none available + FloatSqrt 0 0.00% # attempts to use FU when none available + MemRead 114 57.58% # attempts to use FU when none available + MemWrite 75 37.88% # attempts to use FU when none available + IprAccess 0 0.00% # attempts to use FU when none available + InstPrefetch 0 0.00% # attempts to use FU when none available +system.cpu.iq.ISSUE:fu_full.end_dist +system.cpu.iq.ISSUE:issued_per_cycle.start_dist # Number of insts issued each cycle +system.cpu.iq.ISSUE:issued_per_cycle.samples 7479 +system.cpu.iq.ISSUE:issued_per_cycle.min_value 0 + 0 2160 2888.09% + 1 1088 1454.74% + 2 1332 1780.99% + 3 1011 1351.78% + 4 818 1093.73% + 5 568 759.46% + 6 358 478.67% + 7 99 132.37% + 8 45 60.17% +system.cpu.iq.ISSUE:issued_per_cycle.max_value 8 +system.cpu.iq.ISSUE:issued_per_cycle.end_dist + +system.cpu.iq.ISSUE:rate 2.152427 # Inst issue rate +system.cpu.iq.iqInstsAdded 18137 # Number of instructions added to the IQ (excludes non-spec) +system.cpu.iq.iqInstsIssued 16098 # Number of instructions issued +system.cpu.iq.iqNonSpecInstsAdded 39 # Number of non-speculative instructions added to the IQ +system.cpu.iq.iqSquashedInstsExamined 5869 # Number of squashed instructions iterated over during squash; mainly for profiling +system.cpu.iq.iqSquashedInstsIssued 22 # Number of squashed instructions issued +system.cpu.iq.iqSquashedNonSpecRemoved 5 # Number of squashed non-spec instructions that were removed +system.cpu.iq.iqSquashedOperandsExamined 3337 # Number of squashed operands that are examined and possibly removed from graph +system.cpu.numCycles 7479 # number of cpu cycles simulated +system.cpu.rename.RENAME:BlockCycles 350 # Number of cycles rename is blocking +system.cpu.rename.RENAME:CommittedMaps 8222 # Number of HB maps that are committed +system.cpu.rename.RENAME:IdleCycles 8416 # Number of cycles rename is idle +system.cpu.rename.RENAME:LSQFullEvents 695 # Number of times rename has blocked due to LSQ full +system.cpu.rename.RENAME:RenameLookups 26609 # Number of register rename lookups that rename has made +system.cpu.rename.RENAME:RenamedInsts 20867 # Number of instructions processed by rename +system.cpu.rename.RENAME:RenamedOperands 15602 # Number of destination operands rename has renamed +system.cpu.rename.RENAME:RunCycles 3486 # Number of cycles rename is running +system.cpu.rename.RENAME:SquashCycles 1260 # Number of cycles rename is squashing +system.cpu.rename.RENAME:UnblockCycles 771 # Number of cycles rename is unblocking +system.cpu.rename.RENAME:UndoneMaps 7380 # Number of HB maps that are undone due to squashing +system.cpu.rename.RENAME:serializeStallCycles 500 # count of cycles rename stalled for serializing inst +system.cpu.rename.RENAME:serializingInsts 49 # count of serializing insts renamed +system.cpu.rename.RENAME:skidInsts 2217 # count of insts added to the skid buffer +system.cpu.rename.RENAME:tempSerializingInsts 38 # count of temporary serializing insts renamed +system.cpu.workload0.PROG:num_syscalls 17 # Number of system calls +system.cpu.workload1.PROG:num_syscalls 17 # Number of system calls + +---------- End Simulation Statistics ---------- diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr new file mode 100644 index 000000000..a0835d526 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr @@ -0,0 +1,6 @@ +warn: Entering event queue @ 0. Starting simulation... +warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000 +warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0 +warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0 +warn: cycle 5368: fault (page_table_fault) detected @ PC 0x000000 +warn: cycle 5369: fault (page_table_fault) detected @ PC 0x000000 diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout new file mode 100644 index 000000000..5210b5740 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout @@ -0,0 +1,14 @@ +Hello world! +Hello world! +M5 Simulator System + +Copyright (c) 2001-2006 +The Regents of The University of Michigan +All Rights Reserved + + +M5 compiled Oct 6 2006 00:21:18 +M5 started Fri Oct 6 02:55:30 2006 +M5 executing on zizzer.eecs.umich.edu +command line: build/ALPHA_SE/m5.debug configs/example/se.py -d --cmd=tests/test-progs/hello/bin/alpha/linux/hello;tests/test-progs/hello/bin/alpha/linux/hello +Exiting @ tick 7478 because target called exit() diff --git a/tests/quick/01.hello-2T-smt/test.py b/tests/quick/01.hello-2T-smt/test.py new file mode 100644 index 000000000..04ff8c2e6 --- /dev/null +++ b/tests/quick/01.hello-2T-smt/test.py @@ -0,0 +1,32 @@ +# Copyright (c) 2006 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Korey Sewell + +process1 = LiveProcess(cmd = 'hello', executable = binpath('hello')) +process2 = LiveProcess(cmd = 'hello', executable = binpath('hello')) + +root.system.cpu.workload = [process1, process2] -- cgit v1.2.3