From 5cb1840b311a7bba93a658481703ce1e09ccf7bb Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Sun, 8 Oct 2006 20:30:42 -0400 Subject: Fixes for functional path. If the cpu needs to update any state when it gets a functional write (LSQ??) then that code needs to be written. src/cpu/o3/fetch_impl.hh: src/cpu/o3/lsq_impl.hh: src/cpu/ozone/front_end_impl.hh: src/cpu/ozone/lw_lsq_impl.hh: src/cpu/simple/atomic.cc: src/cpu/simple/timing.cc: CPU's can recieve functional accesses, they need to determine if they need to do anything with them. src/mem/bus.cc: src/mem/bus.hh: Make the fuctional path do the correct tye of snoop --HG-- extra : convert_revision : 70d09f954b907a8aa9b8137579cd2b06e02ae2ff --- src/cpu/o3/fetch_impl.hh | 2 +- src/cpu/o3/lsq_impl.hh | 2 +- src/cpu/ozone/front_end_impl.hh | 2 +- src/cpu/ozone/lw_lsq_impl.hh | 2 +- src/cpu/simple/atomic.cc | 5 +++-- src/cpu/simple/timing.cc | 3 ++- src/mem/bus.cc | 14 +++++++++++++- src/mem/bus.hh | 3 +++ 8 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index 497179576..b3c3caaad 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -63,7 +63,7 @@ template void DefaultFetch::IcachePort::recvFunctional(PacketPtr pkt) { - panic("DefaultFetch doesn't expect recvFunctional callback!"); + warn("Default fetch doesn't update it's state from a functional call."); } template diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index 2bbab71f0..7b7d1eb8e 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -46,7 +46,7 @@ template void LSQ::DcachePort::recvFunctional(PacketPtr pkt) { - panic("O3CPU doesn't expect recvFunctional callback!"); + warn("O3CPU doesn't update things on a recvFunctional."); } template diff --git a/src/cpu/ozone/front_end_impl.hh b/src/cpu/ozone/front_end_impl.hh index 5956c5cba..c814ff9c7 100644 --- a/src/cpu/ozone/front_end_impl.hh +++ b/src/cpu/ozone/front_end_impl.hh @@ -59,7 +59,7 @@ template void FrontEnd::IcachePort::recvFunctional(PacketPtr pkt) { - panic("FrontEnd doesn't expect recvFunctional callback!"); + warn("FrontEnd doesn't update state from functional calls"); } template diff --git a/src/cpu/ozone/lw_lsq_impl.hh b/src/cpu/ozone/lw_lsq_impl.hh index 9d17b027f..e523712da 100644 --- a/src/cpu/ozone/lw_lsq_impl.hh +++ b/src/cpu/ozone/lw_lsq_impl.hh @@ -72,7 +72,7 @@ template void OzoneLWLSQ::DcachePort::recvFunctional(PacketPtr pkt) { - panic("O3CPU doesn't expect recvFunctional callback!"); + warn("O3CPU doesn't update things on a recvFunctional"); } template diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 42b0e9783..e21065ebc 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -94,7 +94,7 @@ AtomicSimpleCPU::init() bool AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt) { - panic("AtomicSimpleCPU doesn't expect recvAtomic callback!"); + panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); return true; } @@ -108,7 +108,8 @@ AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt) void AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt) { - panic("AtomicSimpleCPU doesn't expect recvFunctional callback!"); + //No internal storage to update, just return + return; } void diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index a394468b9..48362c42a 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -74,7 +74,8 @@ TimingSimpleCPU::CpuPort::recvAtomic(Packet *pkt) void TimingSimpleCPU::CpuPort::recvFunctional(Packet *pkt) { - panic("TimingSimpleCPU doesn't expect recvFunctional callback!"); + //No internal storage to update, jusst return + return; } void diff --git a/src/mem/bus.cc b/src/mem/bus.cc index daca6f985..1646cbd57 100644 --- a/src/mem/bus.cc +++ b/src/mem/bus.cc @@ -200,6 +200,18 @@ Bus::atomicSnoop(Packet *pkt) } } +void +Bus::functionalSnoop(Packet *pkt) +{ + std::vector ports = findSnoopPorts(pkt->getAddr(), pkt->getSrc()); + + while (!ports.empty()) + { + interfaces[ports.back()]->sendFunctional(pkt); + ports.pop_back(); + } +} + bool Bus::timingSnoop(Packet *pkt) { @@ -236,7 +248,7 @@ Bus::recvFunctional(Packet *pkt) DPRINTF(Bus, "recvFunctional: packet src %d dest %d addr 0x%x cmd %s\n", pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString()); assert(pkt->getDest() == Packet::Broadcast); - atomicSnoop(pkt); + functionalSnoop(pkt); findPort(pkt->getAddr(), pkt->getSrc())->sendFunctional(pkt); } diff --git a/src/mem/bus.hh b/src/mem/bus.hh index 3d7f4ad65..ff4ec9c8c 100644 --- a/src/mem/bus.hh +++ b/src/mem/bus.hh @@ -102,6 +102,9 @@ class Bus : public MemObject /** Snoop all relevant ports atomicly. */ void atomicSnoop(Packet *pkt); + /** Snoop all relevant ports functionally. */ + void functionalSnoop(Packet *pkt); + /** Call snoop on caches, be sure to set SNOOP_COMMIT bit if you want * the snoop to happen * @return True if succeds. -- cgit v1.2.3 From 4cfddc0d772eff614a5b6d61efa846aa7fa706a8 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Sun, 8 Oct 2006 20:47:50 -0400 Subject: Make sure to propogate sendFunctional calls with functional not atomic. src/mem/cache/cache_impl.hh: Fix a error case by putting a panic in. Make sure to propogate sendFunctional calls with functional not atomic. --HG-- extra : convert_revision : 05d03f729a40cfa3ecb68bcba172eb560b24e897 --- src/mem/cache/cache_impl.hh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 1f03065b6..9ce8f515d 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -603,7 +603,7 @@ Cache::probe(Packet * &pkt, bool update, CachePort // update the cache state and statistics if (mshr || !writes.empty()){ // Can't handle it, return pktuest unsatisfied. - return 0; + panic("Atomic access ran into outstanding MSHR's or WB's!"); } if (!pkt->req->isUncacheable()) { // Fetch the cache block to fill @@ -655,7 +655,7 @@ Cache::probe(Packet * &pkt, bool update, CachePort hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++; } else if (pkt->isWrite()) { // Still need to change data in all locations. - return otherSidePort->sendAtomic(pkt); + otherSidePort->sendFunctional(pkt); } return curTick + lat; } -- cgit v1.2.3 From 0a3e4d56e5d7d9aad4a34dc561a5b4fa84337c5f Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Sun, 8 Oct 2006 21:08:27 -0400 Subject: Update stats for functional path fix --HG-- extra : convert_revision : 0f38abab28e7e44f1dc748c25938185651dd1b7d --- .../00.hello/ref/alpha/linux/o3-timing/m5stats.txt | 30 ++++---- .../00.hello/ref/alpha/linux/o3-timing/stderr | 9 +++ .../00.hello/ref/alpha/linux/o3-timing/stdout | 4 +- .../ref/alpha/linux/simple-timing/m5stats.txt | 30 ++++---- .../ref/alpha/linux/o3-timing/m5stats.txt | 84 +++++++++++----------- .../ref/alpha/linux/o3-timing/stderr | 18 +++++ .../ref/alpha/linux/o3-timing/stdout | 4 +- .../ref/alpha/eio/simple-timing/m5stats.txt | 32 ++++----- .../ref/alpha/eio/simple-timing/stdout | 4 +- 9 files changed, 116 insertions(+), 99 deletions(-) diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt index b8dbf28af..59cda42d9 100644 --- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt +++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt @@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 420 # Nu global.BPredUnit.condPredicted 1302 # Number of conditional branches predicted global.BPredUnit.lookups 2254 # Number of BP lookups global.BPredUnit.usedRAS 291 # Number of times the RAS was used to get a target. -host_inst_rate 1748 # Simulator instruction rate (inst/s) -host_mem_usage 160364 # Number of bytes of host memory used -host_seconds 3.22 # Real time elapsed on the host -host_tick_rate 2135 # Simulator tick rate (ticks/s) +host_inst_rate 46995 # Simulator instruction rate (inst/s) +host_mem_usage 160420 # Number of bytes of host memory used +host_seconds 0.12 # Real time elapsed on the host +host_tick_rate 57256 # Simulator tick rate (ticks/s) memdepunit.memDep.conflictingLoads 12 # Number of conflicting loads. memdepunit.memDep.conflictingStores 259 # Number of conflicting stores. memdepunit.memDep.insertedLoads 2049 # Number of loads inserted to the mem dependence unit. @@ -334,41 +334,39 @@ system.cpu.l2cache.ReadReq_misses 492 # nu system.cpu.l2cache.ReadReq_mshr_miss_latency 492 # number of ReadReq MSHR miss cycles system.cpu.l2cache.ReadReq_mshr_miss_rate 0.995951 # mshr miss rate for ReadReq accesses system.cpu.l2cache.ReadReq_mshr_misses 492 # number of ReadReq MSHR misses -system.cpu.l2cache.WriteReq_accesses 2 # number of WriteReq accesses(hits+misses) -system.cpu.l2cache.WriteReq_hits 2 # number of WriteReq hits system.cpu.l2cache.avg_blocked_cycles_no_mshrs # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles_no_targets # average number of cycles each access was blocked -system.cpu.l2cache.avg_refs 0.008130 # Average number of references to valid blocks. +system.cpu.l2cache.avg_refs 0.004065 # Average number of references to valid blocks. system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.cache_copies 0 # number of cache copies performed -system.cpu.l2cache.demand_accesses 496 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses 494 # number of demand (read+write) accesses system.cpu.l2cache.demand_avg_miss_latency 2.071138 # average overall miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency -system.cpu.l2cache.demand_hits 4 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits system.cpu.l2cache.demand_miss_latency 1019 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_rate 0.991935 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate 0.995951 # miss rate for demand accesses system.cpu.l2cache.demand_misses 492 # number of demand (read+write) misses system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits system.cpu.l2cache.demand_mshr_miss_latency 492 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_rate 0.991935 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate 0.995951 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_misses 492 # number of demand (read+write) MSHR misses system.cpu.l2cache.fast_writes 0 # number of fast writes performed system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate -system.cpu.l2cache.overall_accesses 496 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses 494 # number of overall (read+write) accesses system.cpu.l2cache.overall_avg_miss_latency 2.071138 # average overall miss latency system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency system.cpu.l2cache.overall_avg_mshr_uncacheable_latency # average overall mshr uncacheable latency -system.cpu.l2cache.overall_hits 4 # number of overall hits +system.cpu.l2cache.overall_hits 2 # number of overall hits system.cpu.l2cache.overall_miss_latency 1019 # number of overall miss cycles -system.cpu.l2cache.overall_miss_rate 0.991935 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate 0.995951 # miss rate for overall accesses system.cpu.l2cache.overall_misses 492 # number of overall misses system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits system.cpu.l2cache.overall_mshr_miss_latency 492 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_rate 0.991935 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate 0.995951 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_misses 492 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses @@ -385,7 +383,7 @@ system.cpu.l2cache.replacements 0 # nu system.cpu.l2cache.sampled_refs 492 # Sample count of references to valid blocks. system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions system.cpu.l2cache.tagsinuse 290.948901 # Cycle average of tags in use -system.cpu.l2cache.total_refs 4 # Total number of references to valid blocks. +system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks. system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit. system.cpu.l2cache.writebacks 0 # number of writebacks system.cpu.numCycles 6869 # number of cpu cycles simulated diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr index 8893caac8..558105896 100644 --- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr +++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr @@ -1,3 +1,12 @@ warn: Entering event queue @ 0. Starting simulation... warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000 warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0 +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout index 718827a30..f2a1151c4 100644 --- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout +++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout @@ -6,8 +6,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:00:39 -M5 started Sun Oct 8 14:00:45 2006 +M5 compiled Oct 8 2006 20:54:51 +M5 started Sun Oct 8 20:55:10 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing Exiting @ tick 6868 because target called exit() diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt index 757bbb920..2ee3181d8 100644 --- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt +++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt @@ -1,9 +1,9 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 98835 # Simulator instruction rate (inst/s) -host_mem_usage 159632 # Number of bytes of host memory used -host_seconds 0.06 # Real time elapsed on the host -host_tick_rate 144603 # Simulator tick rate (ticks/s) +host_inst_rate 292635 # Simulator instruction rate (inst/s) +host_mem_usage 159688 # Number of bytes of host memory used +host_seconds 0.02 # Real time elapsed on the host +host_tick_rate 422303 # Simulator tick rate (ticks/s) sim_freq 1000000000000 # Frequency of simulated ticks sim_insts 5642 # Number of instructions simulated sim_seconds 0.000000 # Number of seconds simulated @@ -153,41 +153,39 @@ system.cpu.l2cache.ReadReq_misses 441 # nu system.cpu.l2cache.ReadReq_mshr_miss_latency 441 # number of ReadReq MSHR miss cycles system.cpu.l2cache.ReadReq_mshr_miss_rate 0.997738 # mshr miss rate for ReadReq accesses system.cpu.l2cache.ReadReq_mshr_misses 441 # number of ReadReq MSHR misses -system.cpu.l2cache.WriteReq_accesses 2 # number of WriteReq accesses(hits+misses) -system.cpu.l2cache.WriteReq_hits 2 # number of WriteReq hits system.cpu.l2cache.avg_blocked_cycles_no_mshrs # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles_no_targets # average number of cycles each access was blocked -system.cpu.l2cache.avg_refs 0.006803 # Average number of references to valid blocks. +system.cpu.l2cache.avg_refs 0.002268 # Average number of references to valid blocks. system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.cache_copies 0 # number of cache copies performed -system.cpu.l2cache.demand_accesses 444 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses 442 # number of demand (read+write) accesses system.cpu.l2cache.demand_avg_miss_latency 2 # average overall miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency -system.cpu.l2cache.demand_hits 3 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits 1 # number of demand (read+write) hits system.cpu.l2cache.demand_miss_latency 882 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_rate 0.993243 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate 0.997738 # miss rate for demand accesses system.cpu.l2cache.demand_misses 441 # number of demand (read+write) misses system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits system.cpu.l2cache.demand_mshr_miss_latency 441 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_rate 0.993243 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate 0.997738 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_misses 441 # number of demand (read+write) MSHR misses system.cpu.l2cache.fast_writes 0 # number of fast writes performed system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate -system.cpu.l2cache.overall_accesses 444 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses 442 # number of overall (read+write) accesses system.cpu.l2cache.overall_avg_miss_latency 2 # average overall miss latency system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency system.cpu.l2cache.overall_avg_mshr_uncacheable_latency # average overall mshr uncacheable latency -system.cpu.l2cache.overall_hits 3 # number of overall hits +system.cpu.l2cache.overall_hits 1 # number of overall hits system.cpu.l2cache.overall_miss_latency 882 # number of overall miss cycles -system.cpu.l2cache.overall_miss_rate 0.993243 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate 0.997738 # miss rate for overall accesses system.cpu.l2cache.overall_misses 441 # number of overall misses system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits system.cpu.l2cache.overall_mshr_miss_latency 441 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_rate 0.993243 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate 0.997738 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_misses 441 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses @@ -204,7 +202,7 @@ system.cpu.l2cache.replacements 0 # nu system.cpu.l2cache.sampled_refs 441 # Sample count of references to valid blocks. system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions system.cpu.l2cache.tagsinuse 240.276061 # Cycle average of tags in use -system.cpu.l2cache.total_refs 3 # Total number of references to valid blocks. +system.cpu.l2cache.total_refs 1 # Total number of references to valid blocks. system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit. system.cpu.l2cache.writebacks 0 # number of writebacks system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt index 15172b43c..9871af3ab 100644 --- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt @@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 1081 # Nu global.BPredUnit.condPredicted 2449 # Number of conditional branches predicted global.BPredUnit.lookups 4173 # Number of BP lookups global.BPredUnit.usedRAS 551 # Number of times the RAS was used to get a target. -host_inst_rate 40630 # Simulator instruction rate (inst/s) -host_mem_usage 161244 # Number of bytes of host memory used -host_seconds 0.28 # Real time elapsed on the host -host_tick_rate 30458 # Simulator tick rate (ticks/s) +host_inst_rate 48339 # Simulator instruction rate (inst/s) +host_mem_usage 161300 # Number of bytes of host memory used +host_seconds 0.23 # Real time elapsed on the host +host_tick_rate 36232 # Simulator tick rate (ticks/s) memdepunit.memDep.conflictingLoads 41 # Number of conflicting loads. memdepunit.memDep.conflictingLoads 39 # Number of conflicting loads. memdepunit.memDep.conflictingStores 194 # Number of conflicting stores. @@ -193,7 +193,7 @@ system.cpu.dcache.overall_mshr_miss_latency_0 741 system.cpu.dcache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles system.cpu.dcache.overall_mshr_miss_rate 0.075551 # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_miss_rate_0 0.075551 # mshr miss rate for overall accesses -system.cpu.dcache.overall_mshr_miss_rate_1 # mshr miss rate for overall accesses +system.cpu.dcache.overall_mshr_miss_rate_1 no value # mshr miss rate for overall accesses system.cpu.dcache.overall_mshr_misses 343 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses_0 343 # number of overall MSHR misses system.cpu.dcache.overall_mshr_misses_1 0 # number of overall MSHR misses @@ -476,20 +476,20 @@ system.cpu.ipc_1 0.666272 # IP system.cpu.ipc_total 1.332425 # IPC: Total IPC of All Threads system.cpu.iq.ISSUE:FU_type_0 8158 # Type of FU issued system.cpu.iq.ISSUE:FU_type_0.start_dist -(null) 2 0.02% # Type of FU issued -IntAlu 5514 67.59% # Type of FU issued -IntMult 1 0.01% # Type of FU issued -IntDiv 0 0.00% # Type of FU issued -FloatAdd 2 0.02% # Type of FU issued -FloatCmp 0 0.00% # Type of FU issued -FloatCvt 0 0.00% # Type of FU issued -FloatMult 0 0.00% # Type of FU issued -FloatDiv 0 0.00% # Type of FU issued -FloatSqrt 0 0.00% # Type of FU issued -MemRead 1662 20.37% # Type of FU issued -MemWrite 977 11.98% # Type of FU issued -IprAccess 0 0.00% # Type of FU issued -InstPrefetch 0 0.00% # Type of FU issued + (null) 2 0.02% # Type of FU issued + IntAlu 5514 67.59% # Type of FU issued + IntMult 1 0.01% # Type of FU issued + IntDiv 0 0.00% # Type of FU issued + FloatAdd 2 0.02% # Type of FU issued + FloatCmp 0 0.00% # Type of FU issued + FloatCvt 0 0.00% # Type of FU issued + FloatMult 0 0.00% # Type of FU issued + FloatDiv 0 0.00% # Type of FU issued + FloatSqrt 0 0.00% # Type of FU issued + MemRead 1662 20.37% # Type of FU issued + MemWrite 977 11.98% # Type of FU issued + IprAccess 0 0.00% # Type of FU issued + InstPrefetch 0 0.00% # Type of FU issued system.cpu.iq.ISSUE:FU_type_0.end_dist system.cpu.iq.ISSUE:FU_type_1 8090 # Type of FU issued system.cpu.iq.ISSUE:FU_type_1.start_dist @@ -590,35 +590,31 @@ system.cpu.l2cache.ReadReq_mshr_miss_rate 0.994802 # m system.cpu.l2cache.ReadReq_mshr_miss_rate_0 0.994802 # mshr miss rate for ReadReq accesses system.cpu.l2cache.ReadReq_mshr_misses 957 # number of ReadReq MSHR misses system.cpu.l2cache.ReadReq_mshr_misses_0 957 # number of ReadReq MSHR misses -system.cpu.l2cache.WriteReq_accesses 4 # number of WriteReq accesses(hits+misses) -system.cpu.l2cache.WriteReq_accesses_0 4 # number of WriteReq accesses(hits+misses) -system.cpu.l2cache.WriteReq_hits 4 # number of WriteReq hits -system.cpu.l2cache.WriteReq_hits_0 4 # number of WriteReq hits -system.cpu.l2cache.avg_blocked_cycles_no_mshrs # average number of cycles each access was blocked -system.cpu.l2cache.avg_blocked_cycles_no_targets # average number of cycles each access was blocked -system.cpu.l2cache.avg_refs 0.009404 # Average number of references to valid blocks. +system.cpu.l2cache.avg_blocked_cycles_no_mshrs no value # average number of cycles each access was blocked +system.cpu.l2cache.avg_blocked_cycles_no_targets no value # average number of cycles each access was blocked +system.cpu.l2cache.avg_refs 0.005225 # Average number of references to valid blocks. system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.cache_copies 0 # number of cache copies performed -system.cpu.l2cache.demand_accesses 966 # number of demand (read+write) accesses -system.cpu.l2cache.demand_accesses_0 966 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses 962 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses_0 962 # number of demand (read+write) accesses system.cpu.l2cache.demand_accesses_1 0 # number of demand (read+write) accesses system.cpu.l2cache.demand_avg_miss_latency 2.059561 # average overall miss latency system.cpu.l2cache.demand_avg_miss_latency_0 2.059561 # average overall miss latency system.cpu.l2cache.demand_avg_miss_latency_1 # average overall miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency_0 1 # average overall mshr miss latency -system.cpu.l2cache.demand_avg_mshr_miss_latency_1 no value # average overall mshr miss latency -system.cpu.l2cache.demand_hits 9 # number of demand (read+write) hits -system.cpu.l2cache.demand_hits_0 9 # number of demand (read+write) hits +system.cpu.l2cache.demand_avg_mshr_miss_latency_1 # average overall mshr miss latency +system.cpu.l2cache.demand_hits 5 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits_0 5 # number of demand (read+write) hits system.cpu.l2cache.demand_hits_1 0 # number of demand (read+write) hits system.cpu.l2cache.demand_miss_latency 1971 # number of demand (read+write) miss cycles system.cpu.l2cache.demand_miss_latency_0 1971 # number of demand (read+write) miss cycles system.cpu.l2cache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_rate 0.990683 # miss rate for demand accesses -system.cpu.l2cache.demand_miss_rate_0 0.990683 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate 0.994802 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate_0 0.994802 # miss rate for demand accesses system.cpu.l2cache.demand_miss_rate_1 # miss rate for demand accesses system.cpu.l2cache.demand_misses 957 # number of demand (read+write) misses system.cpu.l2cache.demand_misses_0 957 # number of demand (read+write) misses @@ -629,8 +625,8 @@ system.cpu.l2cache.demand_mshr_hits_1 0 # nu system.cpu.l2cache.demand_mshr_miss_latency 957 # number of demand (read+write) MSHR miss cycles system.cpu.l2cache.demand_mshr_miss_latency_0 957 # number of demand (read+write) MSHR miss cycles system.cpu.l2cache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_rate 0.990683 # mshr miss rate for demand accesses -system.cpu.l2cache.demand_mshr_miss_rate_0 0.990683 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate 0.994802 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate_0 0.994802 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_miss_rate_1 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_misses 957 # number of demand (read+write) MSHR misses system.cpu.l2cache.demand_mshr_misses_0 957 # number of demand (read+write) MSHR misses @@ -640,8 +636,8 @@ system.cpu.l2cache.mshr_cap_events 0 # nu system.cpu.l2cache.mshr_cap_events_0 0 # number of times MSHR cap was activated system.cpu.l2cache.mshr_cap_events_1 0 # number of times MSHR cap was activated system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate -system.cpu.l2cache.overall_accesses 966 # number of overall (read+write) accesses -system.cpu.l2cache.overall_accesses_0 966 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses 962 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses_0 962 # number of overall (read+write) accesses system.cpu.l2cache.overall_accesses_1 0 # number of overall (read+write) accesses system.cpu.l2cache.overall_avg_miss_latency 2.059561 # average overall miss latency system.cpu.l2cache.overall_avg_miss_latency_0 2.059561 # average overall miss latency @@ -652,14 +648,14 @@ system.cpu.l2cache.overall_avg_mshr_miss_latency_1 system.cpu.l2cache.overall_avg_mshr_uncacheable_latency # average overall mshr uncacheable latency system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_0 # average overall mshr uncacheable latency system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_1 # average overall mshr uncacheable latency -system.cpu.l2cache.overall_hits 9 # number of overall hits -system.cpu.l2cache.overall_hits_0 9 # number of overall hits +system.cpu.l2cache.overall_hits 5 # number of overall hits +system.cpu.l2cache.overall_hits_0 5 # number of overall hits system.cpu.l2cache.overall_hits_1 0 # number of overall hits system.cpu.l2cache.overall_miss_latency 1971 # number of overall miss cycles system.cpu.l2cache.overall_miss_latency_0 1971 # number of overall miss cycles system.cpu.l2cache.overall_miss_latency_1 0 # number of overall miss cycles -system.cpu.l2cache.overall_miss_rate 0.990683 # miss rate for overall accesses -system.cpu.l2cache.overall_miss_rate_0 0.990683 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate 0.994802 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate_0 0.994802 # miss rate for overall accesses system.cpu.l2cache.overall_miss_rate_1 # miss rate for overall accesses system.cpu.l2cache.overall_misses 957 # number of overall misses system.cpu.l2cache.overall_misses_0 957 # number of overall misses @@ -670,8 +666,8 @@ system.cpu.l2cache.overall_mshr_hits_1 0 # nu system.cpu.l2cache.overall_mshr_miss_latency 957 # number of overall MSHR miss cycles system.cpu.l2cache.overall_mshr_miss_latency_0 957 # number of overall MSHR miss cycles system.cpu.l2cache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_rate 0.990683 # mshr miss rate for overall accesses -system.cpu.l2cache.overall_mshr_miss_rate_0 0.990683 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate 0.994802 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate_0 0.994802 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_miss_rate_1 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_misses 957 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_misses_0 957 # number of overall MSHR misses @@ -699,7 +695,7 @@ system.cpu.l2cache.soft_prefetch_mshr_full 0 # system.cpu.l2cache.soft_prefetch_mshr_full_0 0 # number of mshr full events for SW prefetching instrutions system.cpu.l2cache.soft_prefetch_mshr_full_1 0 # number of mshr full events for SW prefetching instrutions system.cpu.l2cache.tagsinuse 558.911632 # Cycle average of tags in use -system.cpu.l2cache.total_refs 9 # Total number of references to valid blocks. +system.cpu.l2cache.total_refs 5 # Total number of references to valid blocks. system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit. system.cpu.l2cache.writebacks 0 # number of writebacks system.cpu.l2cache.writebacks_0 0 # number of writebacks diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr index 890488cd2..48d711163 100644 --- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr @@ -2,3 +2,21 @@ warn: Entering event queue @ 0. Starting simulation... warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000 warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0 warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0 +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. +warn: Default fetch doesn't update it's state from a functional call. diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout index 6b640d359..41cca6f14 100644 --- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout +++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout @@ -7,8 +7,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:00:39 -M5 started Sun Oct 8 14:00:56 2006 +M5 compiled Oct 8 2006 20:54:51 +M5 started Sun Oct 8 20:55:24 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing Exiting @ tick 8441 because target called exit() diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt index 2a6a055ab..ebc70e1f0 100644 --- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt +++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt @@ -1,9 +1,9 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 598582 # Simulator instruction rate (inst/s) -host_mem_usage 159216 # Number of bytes of host memory used -host_seconds 0.84 # Real time elapsed on the host -host_tick_rate 816632 # Simulator tick rate (ticks/s) +host_inst_rate 620088 # Simulator instruction rate (inst/s) +host_mem_usage 159272 # Number of bytes of host memory used +host_seconds 0.81 # Real time elapsed on the host +host_tick_rate 845969 # Simulator tick rate (ticks/s) sim_freq 1000000000000 # Frequency of simulated ticks sim_insts 500000 # Number of instructions simulated sim_seconds 0.000001 # Number of seconds simulated @@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu system.cpu.icache.overall_accesses 500000 # number of overall (read+write) accesses system.cpu.icache.overall_avg_miss_latency 3 # average overall miss latency system.cpu.icache.overall_avg_mshr_miss_latency 2 # average overall mshr miss latency -system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency +system.cpu.icache.overall_avg_mshr_uncacheable_latency # average overall mshr uncacheable latency system.cpu.icache.overall_hits 499597 # number of overall hits system.cpu.icache.overall_miss_latency 1209 # number of overall miss cycles system.cpu.icache.overall_miss_rate 0.000806 # miss rate for overall accesses @@ -152,41 +152,39 @@ system.cpu.l2cache.ReadReq_misses 857 # nu system.cpu.l2cache.ReadReq_mshr_miss_latency 857 # number of ReadReq MSHR miss cycles system.cpu.l2cache.ReadReq_mshr_miss_rate 1 # mshr miss rate for ReadReq accesses system.cpu.l2cache.ReadReq_mshr_misses 857 # number of ReadReq MSHR misses -system.cpu.l2cache.WriteReq_accesses 165 # number of WriteReq accesses(hits+misses) -system.cpu.l2cache.WriteReq_hits 165 # number of WriteReq hits system.cpu.l2cache.avg_blocked_cycles_no_mshrs # average number of cycles each access was blocked system.cpu.l2cache.avg_blocked_cycles_no_targets # average number of cycles each access was blocked -system.cpu.l2cache.avg_refs 0.192532 # Average number of references to valid blocks. +system.cpu.l2cache.avg_refs 0 # Average number of references to valid blocks. system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked system.cpu.l2cache.cache_copies 0 # number of cache copies performed -system.cpu.l2cache.demand_accesses 1022 # number of demand (read+write) accesses +system.cpu.l2cache.demand_accesses 857 # number of demand (read+write) accesses system.cpu.l2cache.demand_avg_miss_latency 2 # average overall miss latency system.cpu.l2cache.demand_avg_mshr_miss_latency 1 # average overall mshr miss latency -system.cpu.l2cache.demand_hits 165 # number of demand (read+write) hits +system.cpu.l2cache.demand_hits 0 # number of demand (read+write) hits system.cpu.l2cache.demand_miss_latency 1714 # number of demand (read+write) miss cycles -system.cpu.l2cache.demand_miss_rate 0.838552 # miss rate for demand accesses +system.cpu.l2cache.demand_miss_rate 1 # miss rate for demand accesses system.cpu.l2cache.demand_misses 857 # number of demand (read+write) misses system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits system.cpu.l2cache.demand_mshr_miss_latency 857 # number of demand (read+write) MSHR miss cycles -system.cpu.l2cache.demand_mshr_miss_rate 0.838552 # mshr miss rate for demand accesses +system.cpu.l2cache.demand_mshr_miss_rate 1 # mshr miss rate for demand accesses system.cpu.l2cache.demand_mshr_misses 857 # number of demand (read+write) MSHR misses system.cpu.l2cache.fast_writes 0 # number of fast writes performed system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate -system.cpu.l2cache.overall_accesses 1022 # number of overall (read+write) accesses +system.cpu.l2cache.overall_accesses 857 # number of overall (read+write) accesses system.cpu.l2cache.overall_avg_miss_latency 2 # average overall miss latency system.cpu.l2cache.overall_avg_mshr_miss_latency 1 # average overall mshr miss latency system.cpu.l2cache.overall_avg_mshr_uncacheable_latency # average overall mshr uncacheable latency -system.cpu.l2cache.overall_hits 165 # number of overall hits +system.cpu.l2cache.overall_hits 0 # number of overall hits system.cpu.l2cache.overall_miss_latency 1714 # number of overall miss cycles -system.cpu.l2cache.overall_miss_rate 0.838552 # miss rate for overall accesses +system.cpu.l2cache.overall_miss_rate 1 # miss rate for overall accesses system.cpu.l2cache.overall_misses 857 # number of overall misses system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits system.cpu.l2cache.overall_mshr_miss_latency 857 # number of overall MSHR miss cycles -system.cpu.l2cache.overall_mshr_miss_rate 0.838552 # mshr miss rate for overall accesses +system.cpu.l2cache.overall_mshr_miss_rate 1 # mshr miss rate for overall accesses system.cpu.l2cache.overall_mshr_misses 857 # number of overall MSHR misses system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses @@ -203,7 +201,7 @@ system.cpu.l2cache.replacements 0 # nu system.cpu.l2cache.sampled_refs 857 # Sample count of references to valid blocks. system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions system.cpu.l2cache.tagsinuse 560.393094 # Cycle average of tags in use -system.cpu.l2cache.total_refs 165 # Total number of references to valid blocks. +system.cpu.l2cache.total_refs 0 # Total number of references to valid blocks. system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit. system.cpu.l2cache.writebacks 0 # number of writebacks system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout index 70c3f2454..076cf0a5a 100644 --- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout +++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout @@ -7,8 +7,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:00:39 -M5 started Sun Oct 8 14:00:59 2006 +M5 compiled Oct 8 2006 20:54:51 +M5 started Sun Oct 8 20:55:29 2006 M5 executing on zizzer.eecs.umich.edu command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing Exiting @ tick 682488 because a thread reached the max instruction count -- cgit v1.2.3 From 31f3f2421454b8ba3286f6e536bcc58af5debf48 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sun, 8 Oct 2006 18:26:59 -0700 Subject: Fixes for Port proxies and proxy parameters. --HG-- extra : convert_revision : 76b16fe2926611bd1c12c8ad7392355ad30a5138 --- src/python/m5/params.py | 2 +- src/python/m5/proxy.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/python/m5/params.py b/src/python/m5/params.py index cbbd23004..93d784181 100644 --- a/src/python/m5/params.py +++ b/src/python/m5/params.py @@ -804,7 +804,7 @@ class PortRef(object): newRef.simobj = simobj assert(isSimObject(newRef.simobj)) if self.peer and not proxy.isproxy(self.peer): - peerObj = memo[self.peer.simobj] + peerObj = self.peer.simobj(_memo=memo) newRef.peer = self.peer.clone(peerObj, memo) assert(not isinstance(newRef.peer, VectorPortRef)) return newRef diff --git a/src/python/m5/proxy.py b/src/python/m5/proxy.py index 7ebc0ae19..e539f14ee 100644 --- a/src/python/m5/proxy.py +++ b/src/python/m5/proxy.py @@ -33,6 +33,8 @@ # ##################################################################### +import copy + class BaseProxy(object): def __init__(self, search_self, search_up): self._search_self = search_self @@ -129,15 +131,22 @@ class AttrProxy(BaseProxy): return super(AttrProxy, self).__getattr__(self, attr) if hasattr(self, '_pdesc'): raise AttributeError, "Attribute reference on bound proxy" - self._modifiers.append(attr) - return self + # Return a copy of self rather than modifying self in place + # since self could be an indirect reference via a variable or + # parameter + new_self = copy.deepcopy(self) + new_self._modifiers.append(attr) + return new_self # support indexing on proxies (e.g., Self.cpu[0]) def __getitem__(self, key): if not isinstance(key, int): raise TypeError, "Proxy object requires integer index" - self._modifiers.append(key) - return self + if hasattr(self, '_pdesc'): + raise AttributeError, "Index operation on bound proxy" + new_self = copy.deepcopy(self) + new_self._modifiers.append(key) + return new_self def find(self, obj): try: -- cgit v1.2.3 From ce6c752ede773388da21dd05f6eff20398a1f447 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Sun, 8 Oct 2006 22:05:34 -0400 Subject: update for m5 base linux. (the last changes were for the latest m5hack, i.e. with nate's stuff in it). tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr: tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout: update for m5 base linux. --HG-- extra : convert_revision : c78a1748bf8a0950450c29a7b96bb8735c1bb3d2 --- .../console.system.sim_console | 8 +- .../linux/tsunami-simple-atomic-dual/m5stats.txt | 377 ++++++++++----------- .../alpha/linux/tsunami-simple-atomic-dual/stderr | 6 +- .../alpha/linux/tsunami-simple-atomic-dual/stdout | 8 +- .../console.system.sim_console | 8 +- .../alpha/linux/tsunami-simple-atomic/m5stats.txt | 102 +++--- .../ref/alpha/linux/tsunami-simple-atomic/stderr | 4 +- .../ref/alpha/linux/tsunami-simple-atomic/stdout | 8 +- .../console.system.sim_console | 8 +- .../linux/tsunami-simple-timing-dual/m5stats.txt | 168 ++++----- .../alpha/linux/tsunami-simple-timing-dual/stderr | 6 +- .../alpha/linux/tsunami-simple-timing-dual/stdout | 8 +- .../console.system.sim_console | 8 +- .../alpha/linux/tsunami-simple-timing/m5stats.txt | 98 +++--- .../ref/alpha/linux/tsunami-simple-timing/stderr | 4 +- .../ref/alpha/linux/tsunami-simple-timing/stdout | 8 +- 16 files changed, 410 insertions(+), 419 deletions(-) diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console index 57a610390..27adebb82 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/console.system.sim_console @@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 memsize 8000000 pages 4000 First free page after ROM 0xFFFFFC0000018000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 - kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 + kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 CPU Clock at 2000 MHz IntrClockFrequency=1024 Booting with 2 processor(s) KSP: 0x20043FE8 PTBR 0x20 @@ -19,7 +19,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067) CallbackFixup 0 18000, t7=FFFFFC000070C000 Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400 - Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006 + Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006 Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Major Options: SMP LEGACY_START VERBOSE_MCHECK Command line: root=/dev/hda1 console=ttyS0 @@ -35,7 +35,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 Console: colour dummy device 80x25 Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) - Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init) + Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init) Mount-cache hash table entries: 512 SMP starting up secondaries. Slave CPU 1 console command START @@ -59,9 +59,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb io scheduler cfq registered loop: loaded (max 8 devices) nbd: registered device at major 43 - sinic.c: M5 Simple Integrated NIC driver ns83820.c: National Semiconductor DP83820 10/100/1000 driver. - ns83820: irq bound to CPU 1 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: enabling optical transceiver eth0: using 64 bit addressing. diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt index 537721d92..e76c1d683 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt @@ -1,219 +1,218 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 1292093 # Simulator instruction rate (inst/s) -host_mem_usage 197872 # Number of bytes of host memory used -host_seconds 51.53 # Real time elapsed on the host -host_tick_rate 72118724 # Simulator tick rate (ticks/s) +host_inst_rate 1270607 # Simulator instruction rate (inst/s) +host_mem_usage 197696 # Number of bytes of host memory used +host_seconds 51.09 # Real time elapsed on the host +host_tick_rate 72782461 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 66579941 # Number of instructions simulated -sim_seconds 1.858108 # Number of seconds simulated -sim_ticks 3716216351 # Number of ticks simulated -system.cpu0.dtb.accesses 604194 # DTB accesses -system.cpu0.dtb.acv 337 # DTB access violations -system.cpu0.dtb.hits 12597930 # DTB hits -system.cpu0.dtb.misses 7857 # DTB misses -system.cpu0.dtb.read_accesses 426113 # DTB read accesses +sim_insts 64909600 # Number of instructions simulated +sim_seconds 1.859078 # Number of seconds simulated +sim_ticks 3718155709 # Number of ticks simulated +system.cpu0.dtb.accesses 544556 # DTB accesses +system.cpu0.dtb.acv 335 # DTB access violations +system.cpu0.dtb.hits 14841931 # DTB hits +system.cpu0.dtb.misses 7356 # DTB misses +system.cpu0.dtb.read_accesses 377530 # DTB read accesses system.cpu0.dtb.read_acv 210 # DTB read access violations -system.cpu0.dtb.read_hits 7793080 # DTB read hits -system.cpu0.dtb.read_misses 7107 # DTB read misses -system.cpu0.dtb.write_accesses 178081 # DTB write accesses -system.cpu0.dtb.write_acv 127 # DTB write access violations -system.cpu0.dtb.write_hits 4804850 # DTB write hits -system.cpu0.dtb.write_misses 750 # DTB write misses -system.cpu0.idle_fraction 0.986701 # Percentage of idle cycles -system.cpu0.itb.accesses 1567177 # ITB accesses +system.cpu0.dtb.read_hits 8970576 # DTB read hits +system.cpu0.dtb.read_misses 6581 # DTB read misses +system.cpu0.dtb.write_accesses 167026 # DTB write accesses +system.cpu0.dtb.write_acv 125 # DTB write access violations +system.cpu0.dtb.write_hits 5871355 # DTB write hits +system.cpu0.dtb.write_misses 775 # DTB write misses +system.cpu0.idle_fraction 0.984943 # Percentage of idle cycles +system.cpu0.itb.accesses 1436270 # ITB accesses system.cpu0.itb.acv 184 # ITB acv -system.cpu0.itb.hits 1563535 # ITB hits -system.cpu0.itb.misses 3642 # ITB misses -system.cpu0.kern.callpal 140535 # number of callpals executed +system.cpu0.itb.hits 1432801 # ITB hits +system.cpu0.itb.misses 3469 # ITB misses +system.cpu0.kern.callpal 182754 # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu0.kern.callpal_wripir 567 0.40% 0.40% # number of callpals executed -system.cpu0.kern.callpal_wrmces 1 0.00% 0.40% # number of callpals executed -system.cpu0.kern.callpal_wrfen 1 0.00% 0.41% # number of callpals executed -system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.41% # number of callpals executed -system.cpu0.kern.callpal_swpctx 2926 2.08% 2.49% # number of callpals executed -system.cpu0.kern.callpal_tbi 49 0.03% 2.52% # number of callpals executed -system.cpu0.kern.callpal_wrent 7 0.00% 2.53% # number of callpals executed -system.cpu0.kern.callpal_swpipl 126411 89.95% 92.48% # number of callpals executed -system.cpu0.kern.callpal_rdps 5784 4.12% 96.59% # number of callpals executed -system.cpu0.kern.callpal_wrkgp 1 0.00% 96.59% # number of callpals executed -system.cpu0.kern.callpal_wrusp 2 0.00% 96.60% # number of callpals executed -system.cpu0.kern.callpal_rdusp 9 0.01% 96.60% # number of callpals executed -system.cpu0.kern.callpal_whami 2 0.00% 96.60% # number of callpals executed -system.cpu0.kern.callpal_rti 4273 3.04% 99.64% # number of callpals executed -system.cpu0.kern.callpal_callsys 366 0.26% 99.90% # number of callpals executed -system.cpu0.kern.callpal_imb 134 0.10% 100.00% # number of callpals executed +system.cpu0.kern.callpal_wripir 115 0.06% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrmces 1 0.00% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrfen 1 0.00% 0.06% # number of callpals executed +system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.07% # number of callpals executed +system.cpu0.kern.callpal_swpctx 3791 2.07% 2.14% # number of callpals executed +system.cpu0.kern.callpal_tbi 49 0.03% 2.17% # number of callpals executed +system.cpu0.kern.callpal_wrent 7 0.00% 2.17% # number of callpals executed +system.cpu0.kern.callpal_swpipl 167832 91.83% 94.01% # number of callpals executed +system.cpu0.kern.callpal_rdps 5780 3.16% 97.17% # number of callpals executed +system.cpu0.kern.callpal_wrkgp 1 0.00% 97.17% # number of callpals executed +system.cpu0.kern.callpal_wrusp 2 0.00% 97.17% # number of callpals executed +system.cpu0.kern.callpal_rdusp 9 0.00% 97.17% # number of callpals executed +system.cpu0.kern.callpal_whami 2 0.00% 97.18% # number of callpals executed +system.cpu0.kern.callpal_rti 4696 2.57% 99.75% # number of callpals executed +system.cpu0.kern.callpal_callsys 344 0.19% 99.93% # number of callpals executed +system.cpu0.kern.callpal_imb 122 0.07% 100.00% # number of callpals executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed -system.cpu0.kern.inst.hwrei 155157 # number of hwrei instructions executed +system.cpu0.kern.inst.hwrei 196249 # number of hwrei instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu0.kern.inst.quiesce 6712 # number of quiesce instructions executed -system.cpu0.kern.ipl_count 133285 # number of times we switched to this ipl -system.cpu0.kern.ipl_count_0 53228 39.94% 39.94% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_21 245 0.18% 40.12% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_22 1895 1.42% 41.54% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_30 460 0.35% 41.89% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_31 77457 58.11% 100.00% # number of times we switched to this ipl -system.cpu0.kern.ipl_good 107676 # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_0 52768 49.01% 49.01% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_21 245 0.23% 49.23% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_22 1895 1.76% 50.99% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_30 460 0.43% 51.42% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_31 52308 48.58% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_ticks 3716215936 # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_0 3683825506 99.13% 99.13% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_21 40474 0.00% 99.13% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_22 162970 0.00% 99.13% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_30 103364 0.00% 99.14% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_31 32083622 0.86% 100.00% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_used 0.807863 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_0 0.991358 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.inst.quiesce 6184 # number of quiesce instructions executed +system.cpu0.kern.ipl_count 174678 # number of times we switched to this ipl +system.cpu0.kern.ipl_count_0 70736 40.50% 40.50% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_21 245 0.14% 40.64% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_22 1896 1.09% 41.72% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_30 8 0.00% 41.73% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_31 101793 58.27% 100.00% # number of times we switched to this ipl +system.cpu0.kern.ipl_good 140889 # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_0 69374 49.24% 49.24% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_21 245 0.17% 49.41% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_22 1896 1.35% 50.76% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_30 8 0.01% 50.77% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_31 69366 49.23% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_ticks 3718155294 # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_0 3683661066 99.07% 99.07% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_21 40474 0.00% 99.07% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_22 163056 0.00% 99.08% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_30 2026 0.00% 99.08% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_31 34288672 0.92% 100.00% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_used 0.806564 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.ipl_used_0 0.980745 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_31 0.675317 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.mode_good_kernel 1221 -system.cpu0.kern.mode_good_user 1222 +system.cpu0.kern.ipl_used_31 0.681442 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.mode_good_kernel 1192 +system.cpu0.kern.mode_good_user 1193 system.cpu0.kern.mode_good_idle 0 -system.cpu0.kern.mode_switch_kernel 6758 # number of protection mode switches -system.cpu0.kern.mode_switch_user 1222 # number of protection mode switches +system.cpu0.kern.mode_switch_kernel 7143 # number of protection mode switches +system.cpu0.kern.mode_switch_user 1193 # number of protection mode switches system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches -system.cpu0.kern.mode_switch_good 0.306140 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_kernel 0.180675 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good 0.286108 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good_kernel 0.166877 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_idle # fraction of useful protection mode switches -system.cpu0.kern.mode_ticks_kernel 3714429703 99.95% 99.95% # number of ticks spent at the given mode -system.cpu0.kern.mode_ticks_user 1786231 0.05% 100.00% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_kernel 3716512331 99.96% 99.96% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_user 1642961 0.04% 100.00% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode -system.cpu0.kern.swap_context 2927 # number of times the context was actually changed -system.cpu0.kern.syscall 217 # number of syscalls executed -system.cpu0.kern.syscall_fork 8 3.69% 3.69% # number of syscalls executed -system.cpu0.kern.syscall_read 19 8.76% 12.44% # number of syscalls executed -system.cpu0.kern.syscall_write 3 1.38% 13.82% # number of syscalls executed -system.cpu0.kern.syscall_close 31 14.29% 28.11% # number of syscalls executed -system.cpu0.kern.syscall_chdir 1 0.46% 28.57% # number of syscalls executed -system.cpu0.kern.syscall_obreak 6 2.76% 31.34% # number of syscalls executed -system.cpu0.kern.syscall_lseek 10 4.61% 35.94% # number of syscalls executed -system.cpu0.kern.syscall_getpid 6 2.76% 38.71% # number of syscalls executed -system.cpu0.kern.syscall_setuid 2 0.92% 39.63% # number of syscalls executed -system.cpu0.kern.syscall_getuid 4 1.84% 41.47% # number of syscalls executed -system.cpu0.kern.syscall_access 6 2.76% 44.24% # number of syscalls executed -system.cpu0.kern.syscall_dup 2 0.92% 45.16% # number of syscalls executed -system.cpu0.kern.syscall_open 33 15.21% 60.37% # number of syscalls executed -system.cpu0.kern.syscall_getgid 4 1.84% 62.21% # number of syscalls executed -system.cpu0.kern.syscall_sigprocmask 10 4.61% 66.82% # number of syscalls executed -system.cpu0.kern.syscall_ioctl 9 4.15% 70.97% # number of syscalls executed -system.cpu0.kern.syscall_execve 6 2.76% 73.73% # number of syscalls executed -system.cpu0.kern.syscall_mmap 25 11.52% 85.25% # number of syscalls executed -system.cpu0.kern.syscall_munmap 3 1.38% 86.64% # number of syscalls executed -system.cpu0.kern.syscall_mprotect 7 3.23% 89.86% # number of syscalls executed -system.cpu0.kern.syscall_gethostname 1 0.46% 90.32% # number of syscalls executed -system.cpu0.kern.syscall_dup2 3 1.38% 91.71% # number of syscalls executed -system.cpu0.kern.syscall_fcntl 8 3.69% 95.39% # number of syscalls executed -system.cpu0.kern.syscall_socket 2 0.92% 96.31% # number of syscalls executed -system.cpu0.kern.syscall_connect 2 0.92% 97.24% # number of syscalls executed -system.cpu0.kern.syscall_setgid 2 0.92% 98.16% # number of syscalls executed -system.cpu0.kern.syscall_getrlimit 2 0.92% 99.08% # number of syscalls executed -system.cpu0.kern.syscall_setsid 2 0.92% 100.00% # number of syscalls executed -system.cpu0.not_idle_fraction 0.013299 # Percentage of non-idle cycles -system.cpu0.numCycles 49421041 # number of cpu cycles simulated -system.cpu0.num_insts 49417215 # Number of instructions executed -system.cpu0.num_refs 12829669 # Number of memory references -system.cpu1.dtb.accesses 701326 # DTB accesses -system.cpu1.dtb.acv 30 # DTB access violations -system.cpu1.dtb.hits 5286923 # DTB hits -system.cpu1.dtb.misses 3658 # DTB misses -system.cpu1.dtb.read_accesses 474933 # DTB read accesses +system.cpu0.kern.swap_context 3792 # number of times the context was actually changed +system.cpu0.kern.syscall 199 # number of syscalls executed +system.cpu0.kern.syscall_fork 8 4.02% 4.02% # number of syscalls executed +system.cpu0.kern.syscall_read 17 8.54% 12.56% # number of syscalls executed +system.cpu0.kern.syscall_write 4 2.01% 14.57% # number of syscalls executed +system.cpu0.kern.syscall_close 29 14.57% 29.15% # number of syscalls executed +system.cpu0.kern.syscall_chdir 1 0.50% 29.65% # number of syscalls executed +system.cpu0.kern.syscall_obreak 4 2.01% 31.66% # number of syscalls executed +system.cpu0.kern.syscall_lseek 10 5.03% 36.68% # number of syscalls executed +system.cpu0.kern.syscall_getpid 6 3.02% 39.70% # number of syscalls executed +system.cpu0.kern.syscall_setuid 1 0.50% 40.20% # number of syscalls executed +system.cpu0.kern.syscall_getuid 3 1.51% 41.71% # number of syscalls executed +system.cpu0.kern.syscall_access 6 3.02% 44.72% # number of syscalls executed +system.cpu0.kern.syscall_dup 2 1.01% 45.73% # number of syscalls executed +system.cpu0.kern.syscall_open 31 15.58% 61.31% # number of syscalls executed +system.cpu0.kern.syscall_getgid 3 1.51% 62.81% # number of syscalls executed +system.cpu0.kern.syscall_sigprocmask 10 5.03% 67.84% # number of syscalls executed +system.cpu0.kern.syscall_ioctl 9 4.52% 72.36% # number of syscalls executed +system.cpu0.kern.syscall_execve 6 3.02% 75.38% # number of syscalls executed +system.cpu0.kern.syscall_mmap 20 10.05% 85.43% # number of syscalls executed +system.cpu0.kern.syscall_munmap 3 1.51% 86.93% # number of syscalls executed +system.cpu0.kern.syscall_mprotect 5 2.51% 89.45% # number of syscalls executed +system.cpu0.kern.syscall_gethostname 1 0.50% 89.95% # number of syscalls executed +system.cpu0.kern.syscall_dup2 3 1.51% 91.46% # number of syscalls executed +system.cpu0.kern.syscall_fcntl 8 4.02% 95.48% # number of syscalls executed +system.cpu0.kern.syscall_socket 2 1.01% 96.48% # number of syscalls executed +system.cpu0.kern.syscall_connect 2 1.01% 97.49% # number of syscalls executed +system.cpu0.kern.syscall_setgid 1 0.50% 97.99% # number of syscalls executed +system.cpu0.kern.syscall_getrlimit 2 1.01% 98.99% # number of syscalls executed +system.cpu0.kern.syscall_setsid 2 1.01% 100.00% # number of syscalls executed +system.cpu0.not_idle_fraction 0.015057 # Percentage of non-idle cycles +system.cpu0.numCycles 55984201 # number of cpu cycles simulated +system.cpu0.num_insts 55980548 # Number of instructions executed +system.cpu0.num_refs 15081320 # Number of memory references +system.cpu1.dtb.accesses 761000 # DTB accesses +system.cpu1.dtb.acv 32 # DTB access violations +system.cpu1.dtb.hits 2653187 # DTB hits +system.cpu1.dtb.misses 4173 # DTB misses +system.cpu1.dtb.read_accesses 523552 # DTB read accesses system.cpu1.dtb.read_acv 0 # DTB read access violations -system.cpu1.dtb.read_hits 3100008 # DTB read hits -system.cpu1.dtb.read_misses 3260 # DTB read misses -system.cpu1.dtb.write_accesses 226393 # DTB write accesses -system.cpu1.dtb.write_acv 30 # DTB write access violations -system.cpu1.dtb.write_hits 2186915 # DTB write hits -system.cpu1.dtb.write_misses 398 # DTB write misses -system.cpu1.idle_fraction 0.995381 # Percentage of idle cycles -system.cpu1.itb.accesses 1714255 # ITB accesses +system.cpu1.dtb.read_hits 1675663 # DTB read hits +system.cpu1.dtb.read_misses 3798 # DTB read misses +system.cpu1.dtb.write_accesses 237448 # DTB write accesses +system.cpu1.dtb.write_acv 32 # DTB write access violations +system.cpu1.dtb.write_hits 977524 # DTB write hits +system.cpu1.dtb.write_misses 375 # DTB write misses +system.cpu1.idle_fraction 0.997598 # Percentage of idle cycles +system.cpu1.itb.accesses 1845187 # ITB accesses system.cpu1.itb.acv 0 # ITB acv -system.cpu1.itb.hits 1712856 # ITB hits -system.cpu1.itb.misses 1399 # ITB misses -system.cpu1.kern.callpal 81795 # number of callpals executed +system.cpu1.itb.hits 1843600 # ITB hits +system.cpu1.itb.misses 1587 # ITB misses +system.cpu1.kern.callpal 34405 # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu1.kern.callpal_wripir 460 0.56% 0.56% # number of callpals executed -system.cpu1.kern.callpal_wrmces 1 0.00% 0.56% # number of callpals executed -system.cpu1.kern.callpal_wrfen 1 0.00% 0.57% # number of callpals executed -system.cpu1.kern.callpal_swpctx 2245 2.74% 3.31% # number of callpals executed -system.cpu1.kern.callpal_tbi 4 0.00% 3.32% # number of callpals executed -system.cpu1.kern.callpal_wrent 7 0.01% 3.32% # number of callpals executed -system.cpu1.kern.callpal_swpipl 71908 87.91% 91.24% # number of callpals executed -system.cpu1.kern.callpal_rdps 3034 3.71% 94.95% # number of callpals executed -system.cpu1.kern.callpal_wrkgp 1 0.00% 94.95% # number of callpals executed -system.cpu1.kern.callpal_wrusp 5 0.01% 94.95% # number of callpals executed -system.cpu1.kern.callpal_whami 3 0.00% 94.96% # number of callpals executed -system.cpu1.kern.callpal_rti 3913 4.78% 99.74% # number of callpals executed -system.cpu1.kern.callpal_callsys 165 0.20% 99.94% # number of callpals executed -system.cpu1.kern.callpal_imb 46 0.06% 100.00% # number of callpals executed +system.cpu1.kern.callpal_wripir 8 0.02% 0.03% # number of callpals executed +system.cpu1.kern.callpal_wrmces 1 0.00% 0.03% # number of callpals executed +system.cpu1.kern.callpal_wrfen 1 0.00% 0.03% # number of callpals executed +system.cpu1.kern.callpal_swpctx 468 1.36% 1.39% # number of callpals executed +system.cpu1.kern.callpal_tbi 5 0.01% 1.41% # number of callpals executed +system.cpu1.kern.callpal_wrent 7 0.02% 1.43% # number of callpals executed +system.cpu1.kern.callpal_swpipl 28030 81.47% 82.90% # number of callpals executed +system.cpu1.kern.callpal_rdps 3042 8.84% 91.74% # number of callpals executed +system.cpu1.kern.callpal_wrkgp 1 0.00% 91.74% # number of callpals executed +system.cpu1.kern.callpal_wrusp 5 0.01% 91.76% # number of callpals executed +system.cpu1.kern.callpal_whami 3 0.01% 91.77% # number of callpals executed +system.cpu1.kern.callpal_rti 2586 7.52% 99.28% # number of callpals executed +system.cpu1.kern.callpal_callsys 187 0.54% 99.83% # number of callpals executed +system.cpu1.kern.callpal_imb 59 0.17% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed -system.cpu1.kern.inst.hwrei 89345 # number of hwrei instructions executed +system.cpu1.kern.inst.hwrei 42209 # number of hwrei instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu1.kern.inst.quiesce 2592 # number of quiesce instructions executed -system.cpu1.kern.ipl_count 78283 # number of times we switched to this ipl -system.cpu1.kern.ipl_count_0 30809 39.36% 39.36% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_22 1894 2.42% 41.78% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_30 567 0.72% 42.50% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_31 45013 57.50% 100.00% # number of times we switched to this ipl -system.cpu1.kern.ipl_good 61674 # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_0 29890 48.46% 48.46% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_22 1894 3.07% 51.54% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_30 567 0.92% 52.45% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_31 29323 47.55% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_ticks 3715795413 # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_0 3690163762 99.31% 99.31% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_22 162884 0.00% 99.31% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_30 130370 0.00% 99.32% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_31 25338397 0.68% 100.00% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_used 0.787834 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_0 0.970171 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.inst.quiesce 2146 # number of quiesce instructions executed +system.cpu1.kern.ipl_count 32627 # number of times we switched to this ipl +system.cpu1.kern.ipl_count_0 11165 34.22% 34.22% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_22 1895 5.81% 40.03% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_30 115 0.35% 40.38% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_31 19452 59.62% 100.00% # number of times we switched to this ipl +system.cpu1.kern.ipl_good 24195 # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_0 11150 46.08% 46.08% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_22 1895 7.83% 53.92% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_30 115 0.48% 54.39% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_31 11035 45.61% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_ticks 3717733449 # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_0 3695802393 99.41% 99.41% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_22 162970 0.00% 99.41% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_30 29122 0.00% 99.42% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_31 21738964 0.58% 100.00% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_used 0.741564 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.ipl_used_0 0.998657 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_31 0.651434 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.mode_good_kernel 1028 -system.cpu1.kern.mode_good_user 535 -system.cpu1.kern.mode_good_idle 493 -system.cpu1.kern.mode_switch_kernel 2307 # number of protection mode switches -system.cpu1.kern.mode_switch_user 535 # number of protection mode switches -system.cpu1.kern.mode_switch_idle 2948 # number of protection mode switches -system.cpu1.kern.mode_switch_good 0.355095 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_kernel 0.445600 # fraction of useful protection mode switches +system.cpu1.kern.ipl_used_31 0.567294 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.mode_good_kernel 602 +system.cpu1.kern.mode_good_user 563 +system.cpu1.kern.mode_good_idle 39 +system.cpu1.kern.mode_switch_kernel 1011 # number of protection mode switches +system.cpu1.kern.mode_switch_user 563 # number of protection mode switches +system.cpu1.kern.mode_switch_idle 2045 # number of protection mode switches +system.cpu1.kern.mode_switch_good 0.332689 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good_kernel 0.595450 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_idle 0.167232 # fraction of useful protection mode switches -system.cpu1.kern.mode_ticks_kernel 12634755 0.34% 0.34% # number of ticks spent at the given mode -system.cpu1.kern.mode_ticks_user 1807179 0.05% 0.39% # number of ticks spent at the given mode -system.cpu1.kern.mode_ticks_idle 3700889452 99.61% 100.00% # number of ticks spent at the given mode -system.cpu1.kern.swap_context 2246 # number of times the context was actually changed -system.cpu1.kern.syscall 112 # number of syscalls executed -system.cpu1.kern.syscall_read 11 9.82% 9.82% # number of syscalls executed -system.cpu1.kern.syscall_write 1 0.89% 10.71% # number of syscalls executed -system.cpu1.kern.syscall_close 12 10.71% 21.43% # number of syscalls executed -system.cpu1.kern.syscall_chmod 1 0.89% 22.32% # number of syscalls executed -system.cpu1.kern.syscall_obreak 9 8.04% 30.36% # number of syscalls executed -system.cpu1.kern.syscall_setuid 2 1.79% 32.14% # number of syscalls executed -system.cpu1.kern.syscall_getuid 2 1.79% 33.93% # number of syscalls executed -system.cpu1.kern.syscall_access 5 4.46% 38.39% # number of syscalls executed -system.cpu1.kern.syscall_open 22 19.64% 58.04% # number of syscalls executed -system.cpu1.kern.syscall_getgid 2 1.79% 59.82% # number of syscalls executed -system.cpu1.kern.syscall_ioctl 1 0.89% 60.71% # number of syscalls executed -system.cpu1.kern.syscall_readlink 1 0.89% 61.61% # number of syscalls executed -system.cpu1.kern.syscall_execve 1 0.89% 62.50% # number of syscalls executed -system.cpu1.kern.syscall_mmap 29 25.89% 88.39% # number of syscalls executed -system.cpu1.kern.syscall_mprotect 9 8.04% 96.43% # number of syscalls executed -system.cpu1.kern.syscall_fcntl 2 1.79% 98.21% # number of syscalls executed -system.cpu1.kern.syscall_setgid 2 1.79% 100.00% # number of syscalls executed -system.cpu1.not_idle_fraction 0.004619 # Percentage of non-idle cycles -system.cpu1.numCycles 17164125 # number of cpu cycles simulated -system.cpu1.num_insts 17162726 # Number of instructions executed -system.cpu1.num_refs 5316705 # Number of memory references +system.cpu1.kern.mode_switch_good_idle 0.019071 # fraction of useful protection mode switches +system.cpu1.kern.mode_ticks_kernel 4713507 0.13% 0.13% # number of ticks spent at the given mode +system.cpu1.kern.mode_ticks_user 1950903 0.05% 0.18% # number of ticks spent at the given mode +system.cpu1.kern.mode_ticks_idle 3710606044 99.82% 100.00% # number of ticks spent at the given mode +system.cpu1.kern.swap_context 469 # number of times the context was actually changed +system.cpu1.kern.syscall 130 # number of syscalls executed +system.cpu1.kern.syscall_read 13 10.00% 10.00% # number of syscalls executed +system.cpu1.kern.syscall_close 14 10.77% 20.77% # number of syscalls executed +system.cpu1.kern.syscall_chmod 1 0.77% 21.54% # number of syscalls executed +system.cpu1.kern.syscall_obreak 11 8.46% 30.00% # number of syscalls executed +system.cpu1.kern.syscall_setuid 3 2.31% 32.31% # number of syscalls executed +system.cpu1.kern.syscall_getuid 3 2.31% 34.62% # number of syscalls executed +system.cpu1.kern.syscall_access 5 3.85% 38.46% # number of syscalls executed +system.cpu1.kern.syscall_open 24 18.46% 56.92% # number of syscalls executed +system.cpu1.kern.syscall_getgid 3 2.31% 59.23% # number of syscalls executed +system.cpu1.kern.syscall_ioctl 1 0.77% 60.00% # number of syscalls executed +system.cpu1.kern.syscall_readlink 1 0.77% 60.77% # number of syscalls executed +system.cpu1.kern.syscall_execve 1 0.77% 61.54% # number of syscalls executed +system.cpu1.kern.syscall_mmap 34 26.15% 87.69% # number of syscalls executed +system.cpu1.kern.syscall_mprotect 11 8.46% 96.15% # number of syscalls executed +system.cpu1.kern.syscall_fcntl 2 1.54% 97.69% # number of syscalls executed +system.cpu1.kern.syscall_setgid 3 2.31% 100.00% # number of syscalls executed +system.cpu1.not_idle_fraction 0.002402 # Percentage of non-idle cycles +system.cpu1.numCycles 8930639 # number of cpu cycles simulated +system.cpu1.num_insts 8929052 # Number of instructions executed +system.cpu1.num_refs 2665347 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr index d55b33424..14aa2c9ff 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr @@ -1,6 +1,6 @@ 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 -Listening for console connection on port 3457 -0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001 -0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002 +Listening for console connection on port 3456 +0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 +0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001 warn: Entering event queue @ 0. Starting simulation... warn: 195723: Trying to launch CPU number 1! diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout index 76bd8d3c2..18365db1c 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:07:02 -M5 started Sun Oct 8 14:07:57 2006 -M5 executing on zizzer.eecs.umich.edu +M5 compiled Oct 8 2006 21:57:24 +M5 started Sun Oct 8 21:58:13 2006 +M5 executing on zed.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual -Exiting @ tick 3716216351 because m5_exit instruction encountered +Exiting @ tick 3718155709 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console index 1d150a047..5461cc4ab 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/console.system.sim_console @@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 memsize 8000000 pages 4000 First free page after ROM 0xFFFFFC0000018000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 - kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 + kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 CPU Clock at 2000 MHz IntrClockFrequency=1024 Booting with 1 processor(s) KSP: 0x20043FE8 PTBR 0x20 @@ -16,7 +16,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 k_argc = 0 jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067) CallbackFixup 0 18000, t7=FFFFFC000070C000 - Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006 + Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006 Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Major Options: SMP LEGACY_START VERBOSE_MCHECK Command line: root=/dev/hda1 console=ttyS0 @@ -32,7 +32,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 Console: colour dummy device 80x25 Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) - Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init) + Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init) Mount-cache hash table entries: 512 SMP mode deactivated. Brought up 1 CPUs @@ -54,9 +54,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 io scheduler cfq registered loop: loaded (max 8 devices) nbd: registered device at major 43 - sinic.c: M5 Simple Integrated NIC driver ns83820.c: National Semiconductor DP83820 10/100/1000 driver. - ns83820: irq bound to CPU 0 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: enabling optical transceiver eth0: using 64 bit addressing. diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt index c9661f182..e276e91a7 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt @@ -1,86 +1,86 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 1269893 # Simulator instruction rate (inst/s) -host_mem_usage 197712 # Number of bytes of host memory used -host_seconds 48.70 # Real time elapsed on the host -host_tick_rate 74667785 # Simulator tick rate (ticks/s) +host_inst_rate 1389289 # Simulator instruction rate (inst/s) +host_mem_usage 197652 # Number of bytes of host memory used +host_seconds 44.48 # Real time elapsed on the host +host_tick_rate 81712411 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 61839827 # Number of instructions simulated -sim_seconds 1.818060 # Number of seconds simulated -sim_ticks 3636120569 # Number of ticks simulated -system.cpu.dtb.accesses 1304498 # DTB accesses +sim_insts 61788439 # Number of instructions simulated +sim_seconds 1.817090 # Number of seconds simulated +sim_ticks 3634179176 # Number of ticks simulated +system.cpu.dtb.accesses 1304494 # DTB accesses system.cpu.dtb.acv 367 # DTB access violations -system.cpu.dtb.hits 16565944 # DTB hits +system.cpu.dtb.hits 16552094 # DTB hits system.cpu.dtb.misses 11425 # DTB misses -system.cpu.dtb.read_accesses 900427 # DTB read accesses +system.cpu.dtb.read_accesses 900425 # DTB read accesses system.cpu.dtb.read_acv 210 # DTB read access violations -system.cpu.dtb.read_hits 10044011 # DTB read hits +system.cpu.dtb.read_hits 10038384 # DTB read hits system.cpu.dtb.read_misses 10280 # DTB read misses -system.cpu.dtb.write_accesses 404071 # DTB write accesses +system.cpu.dtb.write_accesses 404069 # DTB write accesses system.cpu.dtb.write_acv 157 # DTB write access violations -system.cpu.dtb.write_hits 6521933 # DTB write hits +system.cpu.dtb.write_hits 6513710 # DTB write hits system.cpu.dtb.write_misses 1145 # DTB write misses -system.cpu.idle_fraction 0.982991 # Percentage of idle cycles +system.cpu.idle_fraction 0.982997 # Percentage of idle cycles system.cpu.itb.accesses 3281310 # ITB accesses system.cpu.itb.acv 184 # ITB acv system.cpu.itb.hits 3276320 # ITB hits system.cpu.itb.misses 4990 # ITB misses -system.cpu.kern.callpal 193942 # number of callpals executed +system.cpu.kern.callpal 193842 # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed -system.cpu.kern.callpal_swpctx 4207 2.17% 2.17% # number of callpals executed +system.cpu.kern.callpal_swpctx 4203 2.17% 2.17% # number of callpals executed system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed -system.cpu.kern.callpal_swpipl 176844 91.18% 93.39% # number of callpals executed +system.cpu.kern.callpal_swpipl 176751 91.18% 93.38% # number of callpals executed system.cpu.kern.callpal_rdps 6881 3.55% 96.93% # number of callpals executed system.cpu.kern.callpal_wrkgp 1 0.00% 96.94% # number of callpals executed system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed -system.cpu.kern.callpal_rti 5214 2.69% 99.63% # number of callpals executed +system.cpu.kern.callpal_rti 5211 2.69% 99.63% # number of callpals executed system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed system.cpu.kern.inst.arm 0 # number of arm instructions executed -system.cpu.kern.inst.hwrei 213009 # number of hwrei instructions executed +system.cpu.kern.inst.hwrei 212908 # number of hwrei instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu.kern.inst.quiesce 6282 # number of quiesce instructions executed -system.cpu.kern.ipl_count 184158 # number of times we switched to this ipl -system.cpu.kern.ipl_count_0 75390 40.94% 40.94% # number of times we switched to this ipl +system.cpu.kern.inst.quiesce 6207 # number of quiesce instructions executed +system.cpu.kern.ipl_count 184061 # number of times we switched to this ipl +system.cpu.kern.ipl_count_0 75348 40.94% 40.94% # number of times we switched to this ipl system.cpu.kern.ipl_count_21 245 0.13% 41.07% # number of times we switched to this ipl -system.cpu.kern.ipl_count_22 1854 1.01% 42.08% # number of times we switched to this ipl -system.cpu.kern.ipl_count_31 106669 57.92% 100.00% # number of times we switched to this ipl -system.cpu.kern.ipl_good 150141 # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_0 74021 49.30% 49.30% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_count_22 1853 1.01% 42.08% # number of times we switched to this ipl +system.cpu.kern.ipl_count_31 106615 57.92% 100.00% # number of times we switched to this ipl +system.cpu.kern.ipl_good 150060 # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_0 73981 49.30% 49.30% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_22 1854 1.23% 50.70% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_31 74021 49.30% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_ticks 3636120154 # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_0 3601418096 99.05% 99.05% # number of cycles we spent at this ipl +system.cpu.kern.ipl_good_22 1853 1.23% 50.70% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_31 73981 49.30% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_ticks 3634178761 # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_0 3599646819 99.05% 99.05% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_21 40474 0.00% 99.05% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_22 159444 0.00% 99.05% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_31 34502140 0.95% 100.00% # number of cycles we spent at this ipl -system.cpu.kern.ipl_used 0.815284 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_0 0.981841 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_ticks_22 159358 0.00% 99.06% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_31 34332110 0.94% 100.00% # number of cycles we spent at this ipl +system.cpu.kern.ipl_used 0.815273 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_used_0 0.981858 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_31 0.693932 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.mode_good_kernel 1937 -system.cpu.kern.mode_good_user 1757 +system.cpu.kern.ipl_used_31 0.693908 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.mode_good_kernel 1938 +system.cpu.kern.mode_good_user 1758 system.cpu.kern.mode_good_idle 180 -system.cpu.kern.mode_switch_kernel 5982 # number of protection mode switches -system.cpu.kern.mode_switch_user 1757 # number of protection mode switches -system.cpu.kern.mode_switch_idle 2103 # number of protection mode switches -system.cpu.kern.mode_switch_good 0.393619 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_kernel 0.323805 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_kernel 5978 # number of protection mode switches +system.cpu.kern.mode_switch_user 1758 # number of protection mode switches +system.cpu.kern.mode_switch_idle 2102 # number of protection mode switches +system.cpu.kern.mode_switch_good 0.393983 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_good_kernel 0.324189 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_idle 0.085592 # fraction of useful protection mode switches -system.cpu.kern.mode_ticks_kernel 54647278 1.50% 1.50% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_user 3591234 0.10% 1.60% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_idle 3577881640 98.40% 100.00% # number of ticks spent at the given mode -system.cpu.kern.swap_context 4208 # number of times the context was actually changed +system.cpu.kern.mode_switch_good_idle 0.085633 # fraction of useful protection mode switches +system.cpu.kern.mode_ticks_kernel 54682435 1.50% 1.50% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_user 3591244 0.10% 1.60% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_idle 3575905080 98.40% 100.00% # number of ticks spent at the given mode +system.cpu.kern.swap_context 4204 # number of times the context was actually changed system.cpu.kern.syscall 329 # number of syscalls executed system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed @@ -112,10 +112,10 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed -system.cpu.not_idle_fraction 0.017009 # Percentage of non-idle cycles -system.cpu.numCycles 61845001 # number of cpu cycles simulated -system.cpu.num_insts 61839827 # Number of instructions executed -system.cpu.num_refs 16814484 # Number of memory references +system.cpu.not_idle_fraction 0.017003 # Percentage of non-idle cycles +system.cpu.numCycles 61793613 # number of cpu cycles simulated +system.cpu.num_insts 61788439 # Number of instructions executed +system.cpu.num_refs 16800623 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr index 4741dd710..6204251a5 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr @@ -1,4 +1,4 @@ 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 -Listening for console connection on port 3457 -0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001 +Listening for console connection on port 3456 +0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 warn: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout index f7fe15009..bb7f4ca1e 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:07:02 -M5 started Sun Oct 8 14:07:07 2006 -M5 executing on zizzer.eecs.umich.edu +M5 compiled Oct 8 2006 21:57:24 +M5 started Sun Oct 8 21:57:28 2006 +M5 executing on zed.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic -Exiting @ tick 3636120569 because m5_exit instruction encountered +Exiting @ tick 3634179176 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console index 57a610390..27adebb82 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console @@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 memsize 8000000 pages 4000 First free page after ROM 0xFFFFFC0000018000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 - kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 + kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x2 CPU Clock at 2000 MHz IntrClockFrequency=1024 Booting with 2 processor(s) KSP: 0x20043FE8 PTBR 0x20 @@ -19,7 +19,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067) CallbackFixup 0 18000, t7=FFFFFC000070C000 Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400 - Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006 + Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006 Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Major Options: SMP LEGACY_START VERBOSE_MCHECK Command line: root=/dev/hda1 console=ttyS0 @@ -35,7 +35,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 Console: colour dummy device 80x25 Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) - Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init) + Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init) Mount-cache hash table entries: 512 SMP starting up secondaries. Slave CPU 1 console command START @@ -59,9 +59,7 @@ SlaveCmd: restart FFFFFC0000310020 FFFFFC0000310020 vptb FFFFFFFE00000000 my_rpb io scheduler cfq registered loop: loaded (max 8 devices) nbd: registered device at major 43 - sinic.c: M5 Simple Integrated NIC driver ns83820.c: National Semiconductor DP83820 10/100/1000 driver. - ns83820: irq bound to CPU 1 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: enabling optical transceiver eth0: using 64 bit addressing. diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt index 4f8408501..ff9a06cc7 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt @@ -1,91 +1,91 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 779301 # Simulator instruction rate (inst/s) -host_mem_usage 197344 # Number of bytes of host memory used -host_seconds 85.22 # Real time elapsed on the host -host_tick_rate 43826709 # Simulator tick rate (ticks/s) +host_inst_rate 719379 # Simulator instruction rate (inst/s) +host_mem_usage 197268 # Number of bytes of host memory used +host_seconds 92.21 # Real time elapsed on the host +host_tick_rate 40502079 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 66411500 # Number of instructions simulated -sim_seconds 1.867451 # Number of seconds simulated -sim_ticks 3734901822 # Number of ticks simulated +sim_insts 66337257 # Number of instructions simulated +sim_seconds 1.867449 # Number of seconds simulated +sim_ticks 3734898877 # Number of ticks simulated system.cpu0.dtb.accesses 828318 # DTB accesses system.cpu0.dtb.acv 315 # DTB access violations -system.cpu0.dtb.hits 13279471 # DTB hits +system.cpu0.dtb.hits 13264910 # DTB hits system.cpu0.dtb.misses 7094 # DTB misses system.cpu0.dtb.read_accesses 572336 # DTB read accesses system.cpu0.dtb.read_acv 200 # DTB read access violations -system.cpu0.dtb.read_hits 8207004 # DTB read hits +system.cpu0.dtb.read_hits 8201218 # DTB read hits system.cpu0.dtb.read_misses 6394 # DTB read misses system.cpu0.dtb.write_accesses 255982 # DTB write accesses system.cpu0.dtb.write_acv 115 # DTB write access violations -system.cpu0.dtb.write_hits 5072467 # DTB write hits +system.cpu0.dtb.write_hits 5063692 # DTB write hits system.cpu0.dtb.write_misses 700 # DTB write misses -system.cpu0.idle_fraction 0.982495 # Percentage of idle cycles +system.cpu0.idle_fraction 0.982517 # Percentage of idle cycles system.cpu0.itb.accesses 1888651 # ITB accesses system.cpu0.itb.acv 166 # ITB acv system.cpu0.itb.hits 1885318 # ITB hits system.cpu0.itb.misses 3333 # ITB misses -system.cpu0.kern.callpal 146866 # number of callpals executed +system.cpu0.kern.callpal 146863 # number of callpals executed system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu0.kern.callpal_wripir 507 0.35% 0.35% # number of callpals executed +system.cpu0.kern.callpal_wripir 506 0.34% 0.35% # number of callpals executed system.cpu0.kern.callpal_wrmces 1 0.00% 0.35% # number of callpals executed system.cpu0.kern.callpal_wrfen 1 0.00% 0.35% # number of callpals executed system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.35% # number of callpals executed -system.cpu0.kern.callpal_swpctx 2966 2.02% 2.37% # number of callpals executed +system.cpu0.kern.callpal_swpctx 2962 2.02% 2.36% # number of callpals executed system.cpu0.kern.callpal_tbi 47 0.03% 2.40% # number of callpals executed system.cpu0.kern.callpal_wrent 7 0.00% 2.40% # number of callpals executed -system.cpu0.kern.callpal_swpipl 132441 90.18% 92.58% # number of callpals executed -system.cpu0.kern.callpal_rdps 6235 4.25% 96.83% # number of callpals executed +system.cpu0.kern.callpal_swpipl 132443 90.18% 92.58% # number of callpals executed +system.cpu0.kern.callpal_rdps 6236 4.25% 96.83% # number of callpals executed system.cpu0.kern.callpal_wrkgp 1 0.00% 96.83% # number of callpals executed system.cpu0.kern.callpal_wrusp 2 0.00% 96.83% # number of callpals executed system.cpu0.kern.callpal_rdusp 8 0.01% 96.84% # number of callpals executed system.cpu0.kern.callpal_whami 2 0.00% 96.84% # number of callpals executed -system.cpu0.kern.callpal_rti 4201 2.86% 99.70% # number of callpals executed +system.cpu0.kern.callpal_rti 4200 2.86% 99.70% # number of callpals executed system.cpu0.kern.callpal_callsys 317 0.22% 99.91% # number of callpals executed system.cpu0.kern.callpal_imb 128 0.09% 100.00% # number of callpals executed system.cpu0.kern.inst.arm 0 # number of arm instructions executed -system.cpu0.kern.inst.hwrei 160336 # number of hwrei instructions executed +system.cpu0.kern.inst.hwrei 160332 # number of hwrei instructions executed system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu0.kern.inst.quiesce 6712 # number of quiesce instructions executed +system.cpu0.kern.inst.quiesce 6637 # number of quiesce instructions executed system.cpu0.kern.ipl_count 139203 # number of times we switched to this ipl -system.cpu0.kern.ipl_count_0 55746 40.05% 40.05% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_0 55744 40.05% 40.05% # number of times we switched to this ipl system.cpu0.kern.ipl_count_21 245 0.18% 40.22% # number of times we switched to this ipl system.cpu0.kern.ipl_count_22 1904 1.37% 41.59% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_30 411 0.30% 41.89% # number of times we switched to this ipl -system.cpu0.kern.ipl_count_31 80897 58.11% 100.00% # number of times we switched to this ipl -system.cpu0.kern.ipl_good 112531 # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_0 55191 49.05% 49.05% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_count_30 410 0.29% 41.88% # number of times we switched to this ipl +system.cpu0.kern.ipl_count_31 80900 58.12% 100.00% # number of times we switched to this ipl +system.cpu0.kern.ipl_good 112527 # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_0 55189 49.05% 49.05% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_21 245 0.22% 49.26% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_good_22 1904 1.69% 50.95% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_30 411 0.37% 51.32% # number of times we switched to this ipl from a different ipl -system.cpu0.kern.ipl_good_31 54780 48.68% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_30 410 0.36% 51.32% # number of times we switched to this ipl from a different ipl +system.cpu0.kern.ipl_good_31 54779 48.68% 100.00% # number of times we switched to this ipl from a different ipl system.cpu0.kern.ipl_ticks 3734378988 # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_0 3696129107 98.98% 98.98% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_0 3696326531 98.98% 98.98% # number of cycles we spent at this ipl system.cpu0.kern.ipl_ticks_21 53683 0.00% 98.98% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_22 224672 0.01% 98.98% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_30 128598 0.00% 98.99% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_ticks_31 37842928 1.01% 100.00% # number of cycles we spent at this ipl -system.cpu0.kern.ipl_used 0.808395 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.ipl_ticks_22 224672 0.01% 98.99% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_30 128286 0.00% 98.99% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_ticks_31 37645816 1.01% 100.00% # number of cycles we spent at this ipl +system.cpu0.kern.ipl_used 0.808366 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_0 0.990044 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu0.kern.ipl_used_31 0.677157 # fraction of swpipl calls that actually changed the ipl +system.cpu0.kern.ipl_used_31 0.677120 # fraction of swpipl calls that actually changed the ipl system.cpu0.kern.mode_good_kernel 1095 system.cpu0.kern.mode_good_user 1095 system.cpu0.kern.mode_good_idle 0 -system.cpu0.kern.mode_switch_kernel 6633 # number of protection mode switches +system.cpu0.kern.mode_switch_kernel 6628 # number of protection mode switches system.cpu0.kern.mode_switch_user 1095 # number of protection mode switches system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches -system.cpu0.kern.mode_switch_good 0.283385 # fraction of useful protection mode switches -system.cpu0.kern.mode_switch_good_kernel 0.165084 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good 0.283569 # fraction of useful protection mode switches +system.cpu0.kern.mode_switch_good_kernel 0.165208 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu0.kern.mode_switch_good_idle # fraction of useful protection mode switches -system.cpu0.kern.mode_ticks_kernel 3730045371 99.93% 99.93% # number of ticks spent at the given mode +system.cpu0.kern.mode_ticks_kernel 3730042316 99.93% 99.93% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_user 2718822 0.07% 100.00% # number of ticks spent at the given mode system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode -system.cpu0.kern.swap_context 2967 # number of times the context was actually changed +system.cpu0.kern.swap_context 2963 # number of times the context was actually changed system.cpu0.kern.syscall 179 # number of syscalls executed system.cpu0.kern.syscall_fork 7 3.91% 3.91% # number of syscalls executed system.cpu0.kern.syscall_read 14 7.82% 11.73% # number of syscalls executed @@ -115,84 +115,84 @@ system.cpu0.kern.syscall_connect 2 1.12% 97.77% # nu system.cpu0.kern.syscall_setgid 1 0.56% 98.32% # number of syscalls executed system.cpu0.kern.syscall_getrlimit 1 0.56% 98.88% # number of syscalls executed system.cpu0.kern.syscall_setsid 2 1.12% 100.00% # number of syscalls executed -system.cpu0.not_idle_fraction 0.017505 # Percentage of non-idle cycles +system.cpu0.not_idle_fraction 0.017483 # Percentage of non-idle cycles system.cpu0.numCycles 0 # number of cpu cycles simulated -system.cpu0.num_insts 52039310 # Number of instructions executed -system.cpu0.num_refs 13510641 # Number of memory references -system.cpu1.dtb.accesses 477045 # DTB accesses +system.cpu0.num_insts 51973218 # Number of instructions executed +system.cpu0.num_refs 13496062 # Number of memory references +system.cpu1.dtb.accesses 477041 # DTB accesses system.cpu1.dtb.acv 52 # DTB access violations -system.cpu1.dtb.hits 4567143 # DTB hits +system.cpu1.dtb.hits 4561390 # DTB hits system.cpu1.dtb.misses 4359 # DTB misses -system.cpu1.dtb.read_accesses 328553 # DTB read accesses +system.cpu1.dtb.read_accesses 328551 # DTB read accesses system.cpu1.dtb.read_acv 10 # DTB read access violations -system.cpu1.dtb.read_hits 2660612 # DTB read hits +system.cpu1.dtb.read_hits 2657400 # DTB read hits system.cpu1.dtb.read_misses 3911 # DTB read misses -system.cpu1.dtb.write_accesses 148492 # DTB write accesses +system.cpu1.dtb.write_accesses 148490 # DTB write accesses system.cpu1.dtb.write_acv 42 # DTB write access violations -system.cpu1.dtb.write_hits 1906531 # DTB write hits +system.cpu1.dtb.write_hits 1903990 # DTB write hits system.cpu1.dtb.write_misses 448 # DTB write misses -system.cpu1.idle_fraction 0.994923 # Percentage of idle cycles +system.cpu1.idle_fraction 0.994927 # Percentage of idle cycles system.cpu1.itb.accesses 1392687 # ITB accesses system.cpu1.itb.acv 18 # ITB acv system.cpu1.itb.hits 1391015 # ITB hits system.cpu1.itb.misses 1672 # ITB misses -system.cpu1.kern.callpal 74475 # number of callpals executed +system.cpu1.kern.callpal 74370 # number of callpals executed system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed -system.cpu1.kern.callpal_wripir 411 0.55% 0.55% # number of callpals executed +system.cpu1.kern.callpal_wripir 410 0.55% 0.55% # number of callpals executed system.cpu1.kern.callpal_wrmces 1 0.00% 0.55% # number of callpals executed system.cpu1.kern.callpal_wrfen 1 0.00% 0.56% # number of callpals executed -system.cpu1.kern.callpal_swpctx 2106 2.83% 3.38% # number of callpals executed +system.cpu1.kern.callpal_swpctx 2102 2.83% 3.38% # number of callpals executed system.cpu1.kern.callpal_tbi 6 0.01% 3.39% # number of callpals executed system.cpu1.kern.callpal_wrent 7 0.01% 3.40% # number of callpals executed -system.cpu1.kern.callpal_swpipl 65169 87.50% 90.91% # number of callpals executed +system.cpu1.kern.callpal_swpipl 65072 87.50% 90.90% # number of callpals executed system.cpu1.kern.callpal_rdps 2603 3.50% 94.40% # number of callpals executed system.cpu1.kern.callpal_wrkgp 1 0.00% 94.40% # number of callpals executed system.cpu1.kern.callpal_wrusp 5 0.01% 94.41% # number of callpals executed system.cpu1.kern.callpal_rdusp 1 0.00% 94.41% # number of callpals executed system.cpu1.kern.callpal_whami 3 0.00% 94.41% # number of callpals executed -system.cpu1.kern.callpal_rti 3893 5.23% 99.64% # number of callpals executed +system.cpu1.kern.callpal_rti 3890 5.23% 99.64% # number of callpals executed system.cpu1.kern.callpal_callsys 214 0.29% 99.93% # number of callpals executed system.cpu1.kern.callpal_imb 52 0.07% 100.00% # number of callpals executed system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed system.cpu1.kern.inst.arm 0 # number of arm instructions executed -system.cpu1.kern.inst.hwrei 82987 # number of hwrei instructions executed +system.cpu1.kern.inst.hwrei 82881 # number of hwrei instructions executed system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu1.kern.inst.quiesce 2512 # number of quiesce instructions executed -system.cpu1.kern.ipl_count 71472 # number of times we switched to this ipl -system.cpu1.kern.ipl_count_0 27792 38.89% 38.89% # number of times we switched to this ipl +system.cpu1.kern.inst.quiesce 2511 # number of quiesce instructions executed +system.cpu1.kern.ipl_count 71371 # number of times we switched to this ipl +system.cpu1.kern.ipl_count_0 27750 38.88% 38.88% # number of times we switched to this ipl system.cpu1.kern.ipl_count_22 1902 2.66% 41.55% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_30 507 0.71% 42.26% # number of times we switched to this ipl -system.cpu1.kern.ipl_count_31 41271 57.74% 100.00% # number of times we switched to this ipl -system.cpu1.kern.ipl_good 55838 # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_0 26968 48.30% 48.30% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_22 1902 3.41% 51.70% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_30 507 0.91% 52.61% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_good_31 26461 47.39% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu1.kern.ipl_ticks 3734901376 # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_0 3704875983 99.20% 99.20% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_count_30 506 0.71% 42.26% # number of times we switched to this ipl +system.cpu1.kern.ipl_count_31 41213 57.74% 100.00% # number of times we switched to this ipl +system.cpu1.kern.ipl_good 55758 # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_0 26928 48.29% 48.29% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_22 1902 3.41% 51.71% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_30 506 0.91% 52.61% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_good_31 26422 47.39% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu1.kern.ipl_ticks 3734898431 # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_0 3704872588 99.20% 99.20% # number of cycles we spent at this ipl system.cpu1.kern.ipl_ticks_22 224436 0.01% 99.20% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_30 162794 0.00% 99.21% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_ticks_31 29638163 0.79% 100.00% # number of cycles we spent at this ipl -system.cpu1.kern.ipl_used 0.781257 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_0 0.970351 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.ipl_ticks_30 162482 0.00% 99.21% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_ticks_31 29638925 0.79% 100.00% # number of cycles we spent at this ipl +system.cpu1.kern.ipl_used 0.781242 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.ipl_used_0 0.970378 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.ipl_used_31 0.641152 # fraction of swpipl calls that actually changed the ipl -system.cpu1.kern.mode_good_kernel 1094 +system.cpu1.kern.ipl_used_31 0.641108 # fraction of swpipl calls that actually changed the ipl +system.cpu1.kern.mode_good_kernel 1093 system.cpu1.kern.mode_good_user 662 -system.cpu1.kern.mode_good_idle 432 -system.cpu1.kern.mode_switch_kernel 2358 # number of protection mode switches +system.cpu1.kern.mode_good_idle 431 +system.cpu1.kern.mode_switch_kernel 2354 # number of protection mode switches system.cpu1.kern.mode_switch_user 662 # number of protection mode switches -system.cpu1.kern.mode_switch_idle 2831 # number of protection mode switches -system.cpu1.kern.mode_switch_good 0.373953 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_kernel 0.463953 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_idle 2830 # number of protection mode switches +system.cpu1.kern.mode_switch_good 0.373931 # fraction of useful protection mode switches +system.cpu1.kern.mode_switch_good_kernel 0.464316 # fraction of useful protection mode switches system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches -system.cpu1.kern.mode_switch_good_idle 0.152596 # fraction of useful protection mode switches -system.cpu1.kern.mode_ticks_kernel 13374855 0.36% 0.36% # number of ticks spent at the given mode +system.cpu1.kern.mode_switch_good_idle 0.152297 # fraction of useful protection mode switches +system.cpu1.kern.mode_ticks_kernel 13359666 0.36% 0.36% # number of ticks spent at the given mode system.cpu1.kern.mode_ticks_user 1967356 0.05% 0.41% # number of ticks spent at the given mode -system.cpu1.kern.mode_ticks_idle 3719559163 99.59% 100.00% # number of ticks spent at the given mode -system.cpu1.kern.swap_context 2107 # number of times the context was actually changed +system.cpu1.kern.mode_ticks_idle 3719571407 99.59% 100.00% # number of ticks spent at the given mode +system.cpu1.kern.swap_context 2103 # number of times the context was actually changed system.cpu1.kern.syscall 150 # number of syscalls executed system.cpu1.kern.syscall_fork 1 0.67% 0.67% # number of syscalls executed system.cpu1.kern.syscall_read 16 10.67% 11.33% # number of syscalls executed @@ -216,10 +216,10 @@ system.cpu1.kern.syscall_dup2 1 0.67% 96.00% # nu system.cpu1.kern.syscall_fcntl 2 1.33% 97.33% # number of syscalls executed system.cpu1.kern.syscall_setgid 3 2.00% 99.33% # number of syscalls executed system.cpu1.kern.syscall_getrlimit 1 0.67% 100.00% # number of syscalls executed -system.cpu1.not_idle_fraction 0.005077 # Percentage of non-idle cycles +system.cpu1.not_idle_fraction 0.005073 # Percentage of non-idle cycles system.cpu1.numCycles 0 # number of cpu cycles simulated -system.cpu1.num_insts 14372190 # Number of instructions executed -system.cpu1.num_refs 4596339 # Number of memory references +system.cpu1.num_insts 14364039 # Number of instructions executed +system.cpu1.num_refs 4590544 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). @@ -234,7 +234,7 @@ system.disk2.dma_write_full_pages 1 # Nu system.disk2.dma_write_txs 1 # Number of DMA write transactions. system.tsunami.ethernet.coalescedRxDesc # average number of RxDesc's coalesced into each post system.tsunami.ethernet.coalescedRxIdle # average number of RxIdle's coalesced into each post -system.tsunami.ethernet.coalescedRxOk # average number of RxOk's coalesced into each post +system.tsunami.ethernet.coalescedRxOk no value # average number of RxOk's coalesced into each post system.tsunami.ethernet.coalescedRxOrn # average number of RxOrn's coalesced into each post system.tsunami.ethernet.coalescedSwi # average number of Swi's coalesced into each post system.tsunami.ethernet.coalescedTotal # average number of interrupts coalesced into each post diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr index 64d80c0d2..c8703fde1 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr @@ -1,6 +1,6 @@ 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 -Listening for console connection on port 3457 -0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001 -0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002 +Listening for console connection on port 3456 +0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 +0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001 warn: Entering event queue @ 0. Starting simulation... warn: 271343: Trying to launch CPU number 1! diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout index 3b92a25f9..498a94b6f 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:07:02 -M5 started Sun Oct 8 14:10:09 2006 -M5 executing on zizzer.eecs.umich.edu +M5 compiled Oct 8 2006 21:57:24 +M5 started Sun Oct 8 22:00:29 2006 +M5 executing on zed.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual -Exiting @ tick 3734901822 because m5_exit instruction encountered +Exiting @ tick 3734898877 because m5_exit instruction encountered diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console index 1d150a047..5461cc4ab 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/console.system.sim_console @@ -3,7 +3,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 memsize 8000000 pages 4000 First free page after ROM 0xFFFFFC0000018000 HWRPB 0xFFFFFC0000018000 l1pt 0xFFFFFC0000040000 l2pt 0xFFFFFC0000042000 l3pt_rpb 0xFFFFFC0000044000 l3pt_kernel 0xFFFFFC0000048000 l2reserv 0xFFFFFC0000046000 - kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855718, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 + kstart = 0xFFFFFC0000310000, kend = 0xFFFFFC0000855898, kentry = 0xFFFFFC0000310000, numCPUs = 0x1 CPU Clock at 2000 MHz IntrClockFrequency=1024 Booting with 1 processor(s) KSP: 0x20043FE8 PTBR 0x20 @@ -16,7 +16,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 k_argc = 0 jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067) CallbackFixup 0 18000, t7=FFFFFC000070C000 - Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #7 SMP Tue Aug 15 14:40:31 EDT 2006 + Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006 Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM Major Options: SMP LEGACY_START VERBOSE_MCHECK Command line: root=/dev/hda1 console=ttyS0 @@ -32,7 +32,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 Console: colour dummy device 80x25 Dentry cache hash table entries: 32768 (order: 5, 262144 bytes) Inode-cache hash table entries: 16384 (order: 4, 131072 bytes) - Memory: 118784k/131072k available (3316k kernel code, 8952k reserved, 983k data, 224k init) + Memory: 118784k/131072k available (3314k kernel code, 8952k reserved, 983k data, 224k init) Mount-cache hash table entries: 512 SMP mode deactivated. Brought up 1 CPUs @@ -54,9 +54,7 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000 io scheduler cfq registered loop: loaded (max 8 devices) nbd: registered device at major 43 - sinic.c: M5 Simple Integrated NIC driver ns83820.c: National Semiconductor DP83820 10/100/1000 driver. - ns83820: irq bound to CPU 0 eth0: ns83820.c: 0x22c: 00000000, subsystem: 0000:0000 eth0: enabling optical transceiver eth0: using 64 bit addressing. diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt index 8b1a2f192..ba645e5c7 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt @@ -1,86 +1,86 @@ ---------- Begin Simulation Statistics ---------- -host_inst_rate 778282 # Simulator instruction rate (inst/s) -host_mem_usage 196900 # Number of bytes of host memory used -host_seconds 79.42 # Real time elapsed on the host -host_tick_rate 45984556 # Simulator tick rate (ticks/s) +host_inst_rate 740935 # Simulator instruction rate (inst/s) +host_mem_usage 196820 # Number of bytes of host memory used +host_seconds 83.36 # Real time elapsed on the host +host_tick_rate 43810981 # Simulator tick rate (ticks/s) sim_freq 2000000000 # Frequency of simulated ticks -sim_insts 61806956 # Number of instructions simulated -sim_seconds 1.825933 # Number of seconds simulated -sim_ticks 3651865694 # Number of ticks simulated -system.cpu.dtb.accesses 1304498 # DTB accesses +sim_insts 61760478 # Number of instructions simulated +sim_seconds 1.825937 # Number of seconds simulated +sim_ticks 3651873858 # Number of ticks simulated +system.cpu.dtb.accesses 1304494 # DTB accesses system.cpu.dtb.acv 367 # DTB access violations -system.cpu.dtb.hits 16557993 # DTB hits +system.cpu.dtb.hits 16545335 # DTB hits system.cpu.dtb.misses 11425 # DTB misses -system.cpu.dtb.read_accesses 900427 # DTB read accesses +system.cpu.dtb.read_accesses 900425 # DTB read accesses system.cpu.dtb.read_acv 210 # DTB read access violations -system.cpu.dtb.read_hits 10039007 # DTB read hits +system.cpu.dtb.read_hits 10034117 # DTB read hits system.cpu.dtb.read_misses 10280 # DTB read misses -system.cpu.dtb.write_accesses 404071 # DTB write accesses +system.cpu.dtb.write_accesses 404069 # DTB write accesses system.cpu.dtb.write_acv 157 # DTB write access violations -system.cpu.dtb.write_hits 6518986 # DTB write hits +system.cpu.dtb.write_hits 6511218 # DTB write hits system.cpu.dtb.write_misses 1145 # DTB write misses -system.cpu.idle_fraction 0.978522 # Percentage of idle cycles +system.cpu.idle_fraction 0.978539 # Percentage of idle cycles system.cpu.itb.accesses 3281311 # ITB accesses system.cpu.itb.acv 184 # ITB acv system.cpu.itb.hits 3276321 # ITB hits system.cpu.itb.misses 4990 # ITB misses -system.cpu.kern.callpal 194059 # number of callpals executed +system.cpu.kern.callpal 193987 # number of callpals executed system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed -system.cpu.kern.callpal_swpctx 4207 2.17% 2.17% # number of callpals executed +system.cpu.kern.callpal_swpctx 4203 2.17% 2.17% # number of callpals executed system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed -system.cpu.kern.callpal_swpipl 176948 91.18% 93.38% # number of callpals executed -system.cpu.kern.callpal_rdps 6887 3.55% 96.93% # number of callpals executed +system.cpu.kern.callpal_swpipl 176881 91.18% 93.38% # number of callpals executed +system.cpu.kern.callpal_rdps 6888 3.55% 96.93% # number of callpals executed system.cpu.kern.callpal_wrkgp 1 0.00% 96.93% # number of callpals executed system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed -system.cpu.kern.callpal_rti 5221 2.69% 99.63% # number of callpals executed +system.cpu.kern.callpal_rti 5219 2.69% 99.63% # number of callpals executed system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed system.cpu.kern.inst.arm 0 # number of arm instructions executed -system.cpu.kern.inst.hwrei 213133 # number of hwrei instructions executed +system.cpu.kern.inst.hwrei 213061 # number of hwrei instructions executed system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed system.cpu.kern.inst.ivle 0 # number of ivle instructions executed -system.cpu.kern.inst.quiesce 6280 # number of quiesce instructions executed -system.cpu.kern.ipl_count 184276 # number of times we switched to this ipl -system.cpu.kern.ipl_count_0 75422 40.93% 40.93% # number of times we switched to this ipl +system.cpu.kern.inst.quiesce 6207 # number of quiesce instructions executed +system.cpu.kern.ipl_count 184207 # number of times we switched to this ipl +system.cpu.kern.ipl_count_0 75390 40.93% 40.93% # number of times we switched to this ipl system.cpu.kern.ipl_count_21 245 0.13% 41.06% # number of times we switched to this ipl system.cpu.kern.ipl_count_22 1861 1.01% 42.07% # number of times we switched to this ipl -system.cpu.kern.ipl_count_31 106748 57.93% 100.00% # number of times we switched to this ipl -system.cpu.kern.ipl_good 150212 # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_0 74053 49.30% 49.30% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_count_31 106711 57.93% 100.00% # number of times we switched to this ipl +system.cpu.kern.ipl_good 150152 # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_good_0 74023 49.30% 49.30% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl system.cpu.kern.ipl_good_22 1861 1.24% 50.70% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_good_31 74053 49.30% 100.00% # number of times we switched to this ipl from a different ipl -system.cpu.kern.ipl_ticks 3651865248 # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_0 3611061665 98.88% 98.88% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_21 53683 0.00% 98.88% # number of cycles we spent at this ipl +system.cpu.kern.ipl_good_31 74023 49.30% 100.00% # number of times we switched to this ipl from a different ipl +system.cpu.kern.ipl_ticks 3651873412 # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_0 3611240657 98.89% 98.89% # number of cycles we spent at this ipl +system.cpu.kern.ipl_ticks_21 53683 0.00% 98.89% # number of cycles we spent at this ipl system.cpu.kern.ipl_ticks_22 219598 0.01% 98.89% # number of cycles we spent at this ipl -system.cpu.kern.ipl_ticks_31 40530302 1.11% 100.00% # number of cycles we spent at this ipl -system.cpu.kern.ipl_used 0.815147 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_0 0.981849 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_ticks_31 40359474 1.11% 100.00% # number of cycles we spent at this ipl +system.cpu.kern.ipl_used 0.815126 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.ipl_used_0 0.981868 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.ipl_used_31 0.693718 # fraction of swpipl calls that actually changed the ipl -system.cpu.kern.mode_good_kernel 1935 -system.cpu.kern.mode_good_user 1755 +system.cpu.kern.ipl_used_31 0.693677 # fraction of swpipl calls that actually changed the ipl +system.cpu.kern.mode_good_kernel 1934 +system.cpu.kern.mode_good_user 1754 system.cpu.kern.mode_good_idle 180 -system.cpu.kern.mode_switch_kernel 5988 # number of protection mode switches -system.cpu.kern.mode_switch_user 1755 # number of protection mode switches +system.cpu.kern.mode_switch_kernel 5984 # number of protection mode switches +system.cpu.kern.mode_switch_user 1754 # number of protection mode switches system.cpu.kern.mode_switch_idle 2104 # number of protection mode switches -system.cpu.kern.mode_switch_good 0.393013 # fraction of useful protection mode switches -system.cpu.kern.mode_switch_good_kernel 0.323146 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_good 0.393010 # fraction of useful protection mode switches +system.cpu.kern.mode_switch_good_kernel 0.323195 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches system.cpu.kern.mode_switch_good_idle 0.085551 # fraction of useful protection mode switches -system.cpu.kern.mode_ticks_kernel 58882589 1.61% 1.61% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_user 4685612 0.13% 1.74% # number of ticks spent at the given mode -system.cpu.kern.mode_ticks_idle 3588297045 98.26% 100.00% # number of ticks spent at the given mode -system.cpu.kern.swap_context 4208 # number of times the context was actually changed +system.cpu.kern.mode_ticks_kernel 58926919 1.61% 1.61% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_user 4685602 0.13% 1.74% # number of ticks spent at the given mode +system.cpu.kern.mode_ticks_idle 3588260889 98.26% 100.00% # number of ticks spent at the given mode +system.cpu.kern.swap_context 4204 # number of times the context was actually changed system.cpu.kern.syscall 329 # number of syscalls executed system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed @@ -112,10 +112,10 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed -system.cpu.not_idle_fraction 0.021478 # Percentage of non-idle cycles +system.cpu.not_idle_fraction 0.021461 # Percentage of non-idle cycles system.cpu.numCycles 0 # number of cpu cycles simulated -system.cpu.num_insts 61806956 # Number of instructions executed -system.cpu.num_refs 16806539 # Number of memory references +system.cpu.num_insts 61760478 # Number of instructions executed +system.cpu.num_refs 16793874 # Number of memory references system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD). system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD). system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD). @@ -130,9 +130,9 @@ system.disk2.dma_write_full_pages 1 # Nu system.disk2.dma_write_txs 1 # Number of DMA write transactions. system.tsunami.ethernet.coalescedRxDesc # average number of RxDesc's coalesced into each post system.tsunami.ethernet.coalescedRxIdle # average number of RxIdle's coalesced into each post -system.tsunami.ethernet.coalescedRxOk no value # average number of RxOk's coalesced into each post +system.tsunami.ethernet.coalescedRxOk # average number of RxOk's coalesced into each post system.tsunami.ethernet.coalescedRxOrn # average number of RxOrn's coalesced into each post -system.tsunami.ethernet.coalescedSwi no value # average number of Swi's coalesced into each post +system.tsunami.ethernet.coalescedSwi # average number of Swi's coalesced into each post system.tsunami.ethernet.coalescedTotal # average number of interrupts coalesced into each post system.tsunami.ethernet.coalescedTxDesc # average number of TxDesc's coalesced into each post system.tsunami.ethernet.coalescedTxIdle # average number of TxIdle's coalesced into each post diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr index 4741dd710..6204251a5 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr @@ -1,4 +1,4 @@ 0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006 -Listening for console connection on port 3457 -0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001 +Listening for console connection on port 3456 +0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000 warn: Entering event queue @ 0. Starting simulation... diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout index 8c667881d..b54e58e73 100644 --- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout +++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout @@ -5,8 +5,8 @@ The Regents of The University of Michigan All Rights Reserved -M5 compiled Oct 8 2006 14:07:02 -M5 started Sun Oct 8 14:08:49 2006 -M5 executing on zizzer.eecs.umich.edu +M5 compiled Oct 8 2006 21:57:24 +M5 started Sun Oct 8 21:59:05 2006 +M5 executing on zed.eecs.umich.edu command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing -Exiting @ tick 3651865694 because m5_exit instruction encountered +Exiting @ tick 3651873858 because m5_exit instruction encountered -- cgit v1.2.3 From 91c76278b95f74a7a237ba9d89ad2818c2e20a4d Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sun, 8 Oct 2006 19:11:06 -0700 Subject: Set cpu_id params (required by ll/sc code now). --HG-- extra : convert_revision : e0f7ccbeccca191a8edb54494d2b4f9369e9914c --- configs/example/fs.py | 2 ++ configs/example/se.py | 1 + 2 files changed, 3 insertions(+) diff --git a/configs/example/fs.py b/configs/example/fs.py index 5edda6e5f..6db26a02a 100644 --- a/configs/example/fs.py +++ b/configs/example/fs.py @@ -77,6 +77,8 @@ else: cpu.clock = '2GHz' cpu2.clock = '2GHz' +cpu.cpu_id = 0 +cpu2.cpu_id = 0 if options.benchmark: if options.benchmark not in Benchmarks: diff --git a/configs/example/se.py b/configs/example/se.py index de8b6c890..d1d19eebc 100644 --- a/configs/example/se.py +++ b/configs/example/se.py @@ -91,6 +91,7 @@ else: cpu = AtomicSimpleCPU() cpu.workload = process +cpu.cpu_id = 0 system = System(cpu = cpu, physmem = PhysicalMemory(), -- cgit v1.2.3 From d52117d1e3b833727ce115c0d6fafeabd826bd90 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Sun, 8 Oct 2006 23:16:40 -0400 Subject: add in serialization of AtomicSimpleCPU _status. This is needed because right now unserializing breaks an assert since CPU status is not saved. Kev says that this will break uniform serialization across CPUs since each type of CPU has its own "status" enum set. So, the repercussions are that if you serialize in this CPU, you must first unserialize in this CPU before switching to something else you want. src/cpu/simple/atomic.cc: add in serialization of AtomicSimpleCPU _status. Kev says that this will break uniform serialization across CPUs since each type of CPU has its own "status" enum set. So, the repercussions are that if you serialize in this CPU, you must first unserialize in this CPU before switching to something else you want. --HG-- extra : convert_revision : 7000f660aecea6fef712bf81853d9a7b90d625ee --- src/cpu/simple/atomic.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 42b0e9783..b0356b2bf 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -161,6 +161,8 @@ AtomicSimpleCPU::serialize(ostream &os) { SimObject::State so_state = SimObject::getState(); SERIALIZE_ENUM(so_state); + Status _status = status(); + SERIALIZE_ENUM(_status); BaseSimpleCPU::serialize(os); nameOut(os, csprintf("%s.tickEvent", name())); tickEvent.serialize(os); @@ -171,6 +173,7 @@ AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) { SimObject::State so_state; UNSERIALIZE_ENUM(so_state); + UNSERIALIZE_ENUM(_status); BaseSimpleCPU::unserialize(cp, section); tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); } -- cgit v1.2.3 From 97c1f6eff75fb1698b04f0f681681cbf80ba58c8 Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Sun, 8 Oct 2006 23:18:19 -0400 Subject: post checkpoint restoration the bus ranges need to be re-initialized for ALL pci devs, not just ide. src/dev/ide_ctrl.cc: this range change needs to be done for all pio devices, not just the ide. src/dev/pcidev.cc: range change needs to be done at here, not in the ide_ctrl file. --HG-- extra : convert_revision : 60c65c55e965b02d671dba7aa8793e5a81f40348 --- src/dev/ide_ctrl.cc | 1 - src/dev/pcidev.cc | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dev/ide_ctrl.cc b/src/dev/ide_ctrl.cc index e8d7f4817..8007fda5e 100644 --- a/src/dev/ide_ctrl.cc +++ b/src/dev/ide_ctrl.cc @@ -742,7 +742,6 @@ IdeController::unserialize(Checkpoint *cp, const std::string §ion) UNSERIALIZE_SCALAR(bm_enabled); UNSERIALIZE_ARRAY(cmd_in_progress, sizeof(cmd_in_progress) / sizeof(cmd_in_progress[0])); - pioPort->sendStatusChange(Port::RangeChange); } #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/src/dev/pcidev.cc b/src/dev/pcidev.cc index c3b83f448..b16ddb31a 100644 --- a/src/dev/pcidev.cc +++ b/src/dev/pcidev.cc @@ -302,6 +302,8 @@ PciDev::unserialize(Checkpoint *cp, const std::string §ion) UNSERIALIZE_ARRAY(BARAddrs, sizeof(BARAddrs) / sizeof(BARAddrs[0])); UNSERIALIZE_ARRAY(config.data, sizeof(config.data) / sizeof(config.data[0])); + pioPort->sendStatusChange(Port::RangeChange); + } #ifndef DOXYGEN_SHOULD_SKIP_THIS -- cgit v1.2.3 From 67a114fc29662d262a3d7ae867f6ee4c25c0ce8f Mon Sep 17 00:00:00 2001 From: Lisa Hsu Date: Mon, 9 Oct 2006 00:12:16 -0400 Subject: add in checkpoint restoration option, you can restore a checkpoint by giving a directory, and then giving a checkpoint number, the earliest checkpoint is 1, the latest is N. the default checkpoint directory is the cwd. so you can restore by a command line like this: m5.opt fs.py --checkpoint_dir="/my/ckpt/dir" -c 3 configs/example/fs.py: add in checkpoint restoration option, you can restore a checkpoint by giving a directory, and then giving a checkpoint number, the earliest checkpoint is 1, the latest is N. --HG-- extra : convert_revision : bf9c8d3265a3875cdfb6a878005baa7ae29af90d --- configs/example/fs.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/configs/example/fs.py b/configs/example/fs.py index 6db26a02a..0dadcbe1b 100644 --- a/configs/example/fs.py +++ b/configs/example/fs.py @@ -55,6 +55,8 @@ parser.add_option("--etherdump", action="store", type="string", dest="etherdump" "ethernet traffic") parser.add_option("--checkpoint_dir", action="store", type="string", help="Place all checkpoints in this absolute directory") +parser.add_option("-c", "--checkpoint", action="store", type="int", + help="restore from checkpoint ") (options, args) = parser.parse_args() @@ -115,6 +117,31 @@ else: m5.instantiate(root) +if options.checkpoint: + from os.path import isdir + from os import listdir, getcwd + import re + if options.checkpoint_dir: + cptdir = options.checkpoint_dir + else: + cptdir = getcwd() + + if not isdir(cptdir): + m5.panic("checkpoint dir %s does not exist!" % cptdir) + + dirs = listdir(cptdir) + expr = re.compile('cpt.([0-9]*)') + cpts = [] + for dir in dirs: + match = expr.match(dir) + if match: + cpts.append(match.group(1)) + + if options.checkpoint > len(cpts): + m5.panic('Checkpoint %d not found' % options.checkpoint) + + m5.restoreCheckpoint(root, "/".join([cptdir, "cpt.%s" % cpts[options.checkpoint - 1]])) + if options.maxtick: maxtick = options.maxtick elif options.maxtime: -- cgit v1.2.3 From 6c7ab02682aba37c173962ec907b97483625d18b Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 00:26:10 -0400 Subject: Update the Memtester, commit a config file/test for it. src/cpu/SConscript: Add memtester to the compilation environment. Someone who knows this better should make the MemTest a cpu model parameter. For now attached with the build of o3 cpu. src/cpu/memtest/memtest.cc: src/cpu/memtest/memtest.hh: Update Memtest for new mem system src/python/m5/objects/MemTest.py: Update memtest python description --HG-- extra : convert_revision : d6a63e08fda0975a7abfb23814a86a0caf53e482 --- src/cpu/SConscript | 1 + src/cpu/memtest/memtest.cc | 328 ++++++++++++++++++++++++--------------- src/cpu/memtest/memtest.hh | 102 ++++++++---- src/python/m5/objects/MemTest.py | 10 +- tests/configs/memtest.py | 88 +++++++++++ tests/quick/50.memtest/test.py | 28 ++++ 6 files changed, 392 insertions(+), 165 deletions(-) create mode 100644 tests/configs/memtest.py create mode 100644 tests/quick/50.memtest/test.py diff --git a/src/cpu/SConscript b/src/cpu/SConscript index 2bb9a2399..5771a7904 100644 --- a/src/cpu/SConscript +++ b/src/cpu/SConscript @@ -158,6 +158,7 @@ if 'O3CPU' in env['CPU_MODELS']: o3/scoreboard.cc o3/store_set.cc ''') + sources += Split('memtest/memtest.cc') if env['USE_CHECKER']: sources += Split('o3/checker_builder.cc') else: diff --git a/src/cpu/memtest/memtest.cc b/src/cpu/memtest/memtest.cc index 7ea9eaefc..186b6ba50 100644 --- a/src/cpu/memtest/memtest.cc +++ b/src/cpu/memtest/memtest.cc @@ -38,39 +38,80 @@ #include "base/misc.hh" #include "base/statistics.hh" -#include "cpu/simple_thread.hh" +//#include "cpu/simple_thread.hh" #include "cpu/memtest/memtest.hh" -#include "mem/cache/base_cache.hh" +//#include "mem/cache/base_cache.hh" +//#include "mem/physical.hh" #include "sim/builder.hh" #include "sim/sim_events.hh" #include "sim/stats.hh" +#include "mem/packet.hh" +#include "mem/request.hh" +#include "mem/port.hh" +#include "mem/mem_object.hh" using namespace std; -using namespace TheISA; int TESTER_ALLOCATOR=0; +bool +MemTest::CpuPort::recvTiming(Packet *pkt) +{ + memtest->completeRequest(pkt); + return true; +} + +Tick +MemTest::CpuPort::recvAtomic(Packet *pkt) +{ + panic("MemTest doesn't expect recvAtomic callback!"); + return curTick; +} + +void +MemTest::CpuPort::recvFunctional(Packet *pkt) +{ + memtest->completeRequest(pkt); +} + +void +MemTest::CpuPort::recvStatusChange(Status status) +{ + if (status == RangeChange) + return; + + panic("MemTest doesn't expect recvStatusChange callback!"); +} + +void +MemTest::CpuPort::recvRetry() +{ + memtest->doRetry(); +} + MemTest::MemTest(const string &name, - MemInterface *_cache_interface, - FunctionalMemory *main_mem, - FunctionalMemory *check_mem, +// MemInterface *_cache_interface, +// PhysicalMemory *main_mem, +// PhysicalMemory *check_mem, unsigned _memorySize, unsigned _percentReads, - unsigned _percentCopies, +// unsigned _percentCopies, unsigned _percentUncacheable, unsigned _progressInterval, unsigned _percentSourceUnaligned, unsigned _percentDestUnaligned, Addr _traceAddr, Counter _max_loads) - : SimObject(name), + : MemObject(name), tickEvent(this), - cacheInterface(_cache_interface), - mainMem(main_mem), - checkMem(check_mem), + cachePort("test", this), + funcPort("functional", this), + retryPkt(NULL), +// mainMem(main_mem), +// checkMem(check_mem), size(_memorySize), percentReads(_percentReads), - percentCopies(_percentCopies), +// percentCopies(_percentCopies), percentUncacheable(_percentUncacheable), progressInterval(_progressInterval), nextProgressMessage(_progressInterval), @@ -81,43 +122,52 @@ MemTest::MemTest(const string &name, vector cmd; cmd.push_back("/bin/ls"); vector null_vec; - thread = new SimpleThread(NULL, 0, mainMem, 0); - - blockSize = cacheInterface->getBlockSize(); - blockAddrMask = blockSize - 1; - traceBlockAddr = blockAddr(_traceAddr); - - //setup data storage with interesting values - uint8_t *data1 = new uint8_t[size]; - uint8_t *data2 = new uint8_t[size]; - uint8_t *data3 = new uint8_t[size]; - memset(data1, 1, size); - memset(data2, 2, size); - memset(data3, 3, size); + // thread = new SimpleThread(NULL, 0, NULL, 0, mainMem); curTick = 0; + // Needs to be masked off once we know the block size. + traceBlockAddr = _traceAddr; baseAddr1 = 0x100000; baseAddr2 = 0x400000; uncacheAddr = 0x800000; - // set up intial memory contents here - mainMem->prot_write(baseAddr1, data1, size); - checkMem->prot_write(baseAddr1, data1, size); - mainMem->prot_write(baseAddr2, data2, size); - checkMem->prot_write(baseAddr2, data2, size); - mainMem->prot_write(uncacheAddr, data3, size); - checkMem->prot_write(uncacheAddr, data3, size); - - delete [] data1; - delete [] data2; - delete [] data3; - // set up counters noResponseCycles = 0; numReads = 0; tickEvent.schedule(0); id = TESTER_ALLOCATOR++; + + accessRetry = false; +} + +Port * +MemTest::getPort(const std::string &if_name, int idx) +{ + if (if_name == "functional") + return &funcPort; + else if (if_name == "test") + return &cachePort; + else + panic("No Such Port\n"); +} + +void +MemTest::init() +{ + // By the time init() is called, the ports should be hooked up. + blockSize = cachePort.peerBlockSize(); + blockAddrMask = blockSize - 1; + traceBlockAddr = blockAddr(traceBlockAddr); + + // set up intial memory contents here + + cachePort.memsetBlob(baseAddr1, 1, size); + funcPort.memsetBlob(baseAddr1, 1, size); + cachePort.memsetBlob(baseAddr2, 2, size); + funcPort.memsetBlob(baseAddr2, 2, size); + cachePort.memsetBlob(uncacheAddr, 3, size); + funcPort.memsetBlob(uncacheAddr, 3, size); } static void @@ -132,23 +182,31 @@ printData(ostream &os, uint8_t *data, int nbytes) } void -MemTest::completeRequest(MemReqPtr &req, uint8_t *data) +MemTest::completeRequest(Packet *pkt) { + MemTestSenderState *state = + dynamic_cast(pkt->senderState); + + uint8_t *data = state->data; + uint8_t *pkt_data = pkt->getPtr(); + Request *req = pkt->req; + //Remove the address from the list of outstanding - std::set::iterator removeAddr = outstandingAddrs.find(req->paddr); + std::set::iterator removeAddr = outstandingAddrs.find(req->getPaddr()); assert(removeAddr != outstandingAddrs.end()); outstandingAddrs.erase(removeAddr); - switch (req->cmd) { - case Read: - if (memcmp(req->data, data, req->size) != 0) { - cerr << name() << ": on read of 0x" << hex << req->paddr - << " (0x" << hex << blockAddr(req->paddr) << ")" + switch (pkt->cmd) { + case Packet::ReadResp: + + if (memcmp(pkt_data, data, pkt->getSize()) != 0) { + cerr << name() << ": on read of 0x" << hex << req->getPaddr() + << " (0x" << hex << blockAddr(req->getPaddr()) << ")" << "@ cycle " << dec << curTick << ", cache returns 0x"; - printData(cerr, req->data, req->size); + printData(cerr, pkt_data, pkt->getSize()); cerr << ", expected 0x"; - printData(cerr, data, req->size); + printData(cerr, data, pkt->getSize()); cerr << endl; fatal(""); } @@ -163,13 +221,13 @@ MemTest::completeRequest(MemReqPtr &req, uint8_t *data) } if (numReads >= maxLoads) - SimExit(curTick, "Maximum number of loads reached!"); + exitSimLoop("Maximum number of loads reached!"); break; - case Write: + case Packet::WriteResp: numWritesStat++; break; - +/* case Copy: //Also remove dest from outstanding list removeAddr = outstandingAddrs.find(req->dest); @@ -177,36 +235,37 @@ MemTest::completeRequest(MemReqPtr &req, uint8_t *data) outstandingAddrs.erase(removeAddr); numCopiesStat++; break; - +*/ default: panic("invalid command"); } - if (blockAddr(req->paddr) == traceBlockAddr) { + if (blockAddr(req->getPaddr()) == traceBlockAddr) { cerr << name() << ": completed " - << (req->cmd.isWrite() ? "write" : "read") + << (pkt->isWrite() ? "write" : "read") << " access of " - << dec << req->size << " bytes at address 0x" - << hex << req->paddr - << " (0x" << hex << blockAddr(req->paddr) << ")" + << dec << pkt->getSize() << " bytes at address 0x" + << hex << req->getPaddr() + << " (0x" << hex << blockAddr(req->getPaddr()) << ")" << ", value = 0x"; - printData(cerr, req->data, req->size); + printData(cerr, pkt_data, pkt->getSize()); cerr << " @ cycle " << dec << curTick; cerr << endl; } noResponseCycles = 0; + delete state; delete [] data; + delete pkt->req; + delete pkt; } - void MemTest::regStats() { using namespace Stats; - numReadsStat .name(name() + ".num_reads") .desc("number of read accesses completed") @@ -234,7 +293,7 @@ MemTest::tick() fatal(""); } - if (cacheInterface->isBlocked()) { + if (accessRetry) { return; } @@ -248,30 +307,30 @@ MemTest::tick() //If we aren't doing copies, use id as offset, and do a false sharing //mem tester - if (percentCopies == 0) { - //We can eliminate the lower bits of the offset, and then use the id - //to offset within the blks - offset &= ~63; //Not the low order bits - offset += id; - access_size = 0; - } + //We can eliminate the lower bits of the offset, and then use the id + //to offset within the blks + offset &= ~63; //Not the low order bits + offset += id; + access_size = 0; - MemReqPtr req = new MemReq(); + Request *req = new Request(); + uint32_t flags = 0; + Addr paddr; if (cacheable < percentUncacheable) { - req->flags |= UNCACHEABLE; - req->paddr = uncacheAddr + offset; + flags |= UNCACHEABLE; + paddr = uncacheAddr + offset; } else { - req->paddr = ((base) ? baseAddr1 : baseAddr2) + offset; + paddr = ((base) ? baseAddr1 : baseAddr2) + offset; } // bool probe = (random() % 2 == 1) && !req->isUncacheable(); bool probe = false; - req->size = 1 << access_size; - req->data = new uint8_t[req->size]; - req->paddr &= ~(req->size - 1); - req->time = curTick; - req->xc = thread->getProxy(); + paddr &= ~((1 << access_size) - 1); + req->setPhys(paddr, 1 << access_size, flags); + req->setThreadContext(id,0); + + uint8_t *result = new uint8_t[8]; if (cmd < percentReads) { // read @@ -279,60 +338,81 @@ MemTest::tick() //For now we only allow one outstanding request per addreess per tester //This means we assume CPU does write forwarding to reads that alias something //in the cpu store buffer. - if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return; - else outstandingAddrs.insert(req->paddr); + if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(paddr); - req->cmd = Read; - uint8_t *result = new uint8_t[8]; - checkMem->access(Read, req->paddr, result, req->size); - if (blockAddr(req->paddr) == traceBlockAddr) { + // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin + funcPort.readBlob(req->getPaddr(), result, req->getSize()); + + if (blockAddr(paddr) == traceBlockAddr) { cerr << name() << ": initiating read " << ((probe) ? "probe of " : "access of ") - << dec << req->size << " bytes from addr 0x" - << hex << req->paddr - << " (0x" << hex << blockAddr(req->paddr) << ")" + << dec << req->getSize() << " bytes from addr 0x" + << hex << paddr + << " (0x" << hex << blockAddr(paddr) << ")" << " at cycle " << dec << curTick << endl; } + + Packet *pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast); + pkt->dataDynamicArray(new uint8_t[req->getSize()]); + MemTestSenderState *state = new MemTestSenderState(result); + pkt->senderState = state; + if (probe) { - cacheInterface->probeAndUpdate(req); - completeRequest(req, result); + cachePort.sendFunctional(pkt); +// completeRequest(pkt, result); } else { - req->completionEvent = new MemCompleteEvent(req, result, this); - cacheInterface->access(req); +// req->completionEvent = new MemCompleteEvent(req, result, this); + if (!cachePort.sendTiming(pkt)) { + accessRetry = true; + retryPkt = pkt; + } } - } else if (cmd < (100 - percentCopies)){ + } else { // write //For now we only allow one outstanding request per addreess per tester //This means we assume CPU does write forwarding to reads that alias something //in the cpu store buffer. - if (outstandingAddrs.find(req->paddr) != outstandingAddrs.end()) return; - else outstandingAddrs.insert(req->paddr); + if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) return; + else outstandingAddrs.insert(paddr); - req->cmd = Write; - memcpy(req->data, &data, req->size); - checkMem->access(Write, req->paddr, req->data, req->size); - if (blockAddr(req->paddr) == traceBlockAddr) { +/* + if (blockAddr(req->getPaddr()) == traceBlockAddr) { cerr << name() << ": initiating write " << ((probe)?"probe of ":"access of ") - << dec << req->size << " bytes (value = 0x"; - printData(cerr, req->data, req->size); + << dec << req->getSize() << " bytes (value = 0x"; + printData(cerr, data_pkt->getPtr(), req->getSize()); cerr << ") to addr 0x" - << hex << req->paddr - << " (0x" << hex << blockAddr(req->paddr) << ")" + << hex << req->getPaddr() + << " (0x" << hex << blockAddr(req->getPaddr()) << ")" << " at cycle " << dec << curTick << endl; } +*/ + Packet *pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast); + uint8_t *pkt_data = new uint8_t[req->getSize()]; + pkt->dataDynamicArray(pkt_data); + memcpy(pkt_data, &data, req->getSize()); + MemTestSenderState *state = new MemTestSenderState(result); + pkt->senderState = state; + + funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize()); + if (probe) { - cacheInterface->probeAndUpdate(req); - completeRequest(req, NULL); + cachePort.sendFunctional(pkt); +// completeRequest(req, NULL); } else { - req->completionEvent = new MemCompleteEvent(req, NULL, this); - cacheInterface->access(req); +// req->completionEvent = new MemCompleteEvent(req, NULL, this); + if (!cachePort.sendTiming(pkt)) { + accessRetry = true; + retryPkt = pkt; + } } - } else { + } +/* else { // copy unsigned source_align = random() % 100; unsigned dest_align = random() % 100; @@ -369,38 +449,32 @@ MemTest::tick() << " (0x" << hex << blockAddr(dest) << ")" << " at cycle " << dec << curTick << endl; - } + }* cacheInterface->access(req); uint8_t result[blockSize]; checkMem->access(Read, source, &result, blockSize); checkMem->access(Write, dest, &result, blockSize); } +*/ } - void -MemCompleteEvent::process() -{ - tester->completeRequest(req, data); - delete this; -} - - -const char * -MemCompleteEvent::description() +MemTest::doRetry() { - return "memory access completion"; + if (cachePort.sendTiming(retryPkt)) { + accessRetry = false; + retryPkt = NULL; + } } - BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest) - SimObjectParam cache; - SimObjectParam main_mem; - SimObjectParam check_mem; +// SimObjectParam cache; +// SimObjectParam main_mem; +// SimObjectParam check_mem; Param memory_size; Param percent_reads; - Param percent_copies; +// Param percent_copies; Param percent_uncacheable; Param progress_interval; Param percent_source_unaligned; @@ -413,12 +487,12 @@ END_DECLARE_SIM_OBJECT_PARAMS(MemTest) BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest) - INIT_PARAM(cache, "L1 cache"), - INIT_PARAM(main_mem, "hierarchical memory"), - INIT_PARAM(check_mem, "check memory"), +// INIT_PARAM(cache, "L1 cache"), +// INIT_PARAM(main_mem, "hierarchical memory"), +// INIT_PARAM(check_mem, "check memory"), INIT_PARAM(memory_size, "memory size"), INIT_PARAM(percent_reads, "target read percentage"), - INIT_PARAM(percent_copies, "target copy percentage"), +// INIT_PARAM(percent_copies, "target copy percentage"), INIT_PARAM(percent_uncacheable, "target uncacheable percentage"), INIT_PARAM(progress_interval, "progress report interval (in accesses)"), INIT_PARAM(percent_source_unaligned, @@ -433,8 +507,8 @@ END_INIT_SIM_OBJECT_PARAMS(MemTest) CREATE_SIM_OBJECT(MemTest) { - return new MemTest(getInstanceName(), cache->getInterface(), main_mem, - check_mem, memory_size, percent_reads, percent_copies, + return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/ + /*check_mem,*/ memory_size, percent_reads, /*percent_copies,*/ percent_uncacheable, progress_interval, percent_source_unaligned, percent_dest_unaligned, trace_addr, max_loads); diff --git a/src/cpu/memtest/memtest.hh b/src/cpu/memtest/memtest.hh index 42fb235db..278012eba 100644 --- a/src/cpu/memtest/memtest.hh +++ b/src/cpu/memtest/memtest.hh @@ -35,25 +35,27 @@ #include #include "base/statistics.hh" -#include "mem/functional/functional.hh" -#include "mem/mem_interface.hh" +//#include "mem/functional/functional.hh" +//#include "mem/mem_interface.hh" #include "sim/eventq.hh" #include "sim/sim_exit.hh" #include "sim/sim_object.hh" #include "sim/stats.hh" +#include "mem/mem_object.hh" +#include "mem/port.hh" -class ThreadContext; -class MemTest : public SimObject +class Packet; +class MemTest : public MemObject { public: MemTest(const std::string &name, - MemInterface *_cache_interface, - FunctionalMemory *main_mem, - FunctionalMemory *check_mem, +// MemInterface *_cache_interface, +// PhysicalMemory *main_mem, +// PhysicalMemory *check_mem, unsigned _memorySize, unsigned _percentReads, - unsigned _percentCopies, +// unsigned _percentCopies, unsigned _percentUncacheable, unsigned _progressInterval, unsigned _percentSourceUnaligned, @@ -61,6 +63,8 @@ class MemTest : public SimObject Addr _traceAddr, Counter _max_loads); + virtual void init(); + // register statistics virtual void regStats(); @@ -69,6 +73,8 @@ class MemTest : public SimObject // main simulation loop (one cycle) void tick(); + virtual Port *getPort(const std::string &if_name, int idx = -1); + protected: class TickEvent : public Event { @@ -82,16 +88,62 @@ class MemTest : public SimObject }; TickEvent tickEvent; + class CpuPort : public Port + { + + MemTest *memtest; + + public: + + CpuPort(const std::string &_name, MemTest *_memtest) + : Port(_name), memtest(_memtest) + { } + + protected: + + virtual bool recvTiming(Packet *pkt); + + virtual Tick recvAtomic(Packet *pkt); + + virtual void recvFunctional(Packet *pkt); + + virtual void recvStatusChange(Status status); + + virtual void recvRetry(); + + virtual void getDeviceAddressRanges(AddrRangeList &resp, + AddrRangeList &snoop) + { resp.clear(); snoop.clear(); } + }; + + CpuPort cachePort; + CpuPort funcPort; + + class MemTestSenderState : public Packet::SenderState + { + public: + /** Constructor. */ + MemTestSenderState(uint8_t *_data) + : data(_data) + { } + + // Hold onto data pointer + uint8_t *data; + }; + +// Request *dataReq; + Packet *retryPkt; +// MemInterface *cacheInterface; +// PhysicalMemory *mainMem; +// PhysicalMemory *checkMem; +// SimpleThread *thread; - MemInterface *cacheInterface; - FunctionalMemory *mainMem; - FunctionalMemory *checkMem; - SimpleThread *thread; + bool accessRetry; unsigned size; // size of testing memory region unsigned percentReads; // target percentage of read accesses - unsigned percentCopies; // target percentage of copy accesses +// unsigned percentCopies; // target percentage of copy accesses unsigned percentUncacheable; int id; @@ -128,29 +180,11 @@ class MemTest : public SimObject Stats::Scalar<> numCopiesStat; // called by MemCompleteEvent::process() - void completeRequest(MemReqPtr &req, uint8_t *data); + void completeRequest(Packet *pkt); - friend class MemCompleteEvent; -}; + void doRetry(); - -class MemCompleteEvent : public Event -{ - MemReqPtr req; - uint8_t *data; - MemTest *tester; - - public: - - MemCompleteEvent(MemReqPtr &_req, uint8_t *_data, MemTest *_tester) - : Event(&mainEventQueue), - req(_req), data(_data), tester(_tester) - { - } - - void process(); - - virtual const char *description(); + friend class MemCompleteEvent; }; #endif // __CPU_MEMTEST_MEMTEST_HH__ diff --git a/src/python/m5/objects/MemTest.py b/src/python/m5/objects/MemTest.py index 97600768f..18aff03f4 100644 --- a/src/python/m5/objects/MemTest.py +++ b/src/python/m5/objects/MemTest.py @@ -1,13 +1,12 @@ from m5.SimObject import SimObject from m5.params import * +from m5.proxy import * +from m5 import build_env + class MemTest(SimObject): type = 'MemTest' - cache = Param.BaseCache("L1 cache") - check_mem = Param.FunctionalMemory("check memory") - main_mem = Param.FunctionalMemory("hierarchical memory") max_loads = Param.Counter("number of loads to execute") memory_size = Param.Int(65536, "memory size") - percent_copies = Param.Percent(0, "target copy percentage") percent_dest_unaligned = Param.Percent(50, "percent of copy dest address that are unaligned") percent_reads = Param.Percent(65, "target read percentage") @@ -18,3 +17,6 @@ class MemTest(SimObject): progress_interval = Param.Counter(1000000, "progress report interval (in accesses)") trace_addr = Param.Addr(0, "address to trace") + + test = Port("Port to the memory system to test") + functional = Port("Port to the functional memory used for verification") diff --git a/tests/configs/memtest.py b/tests/configs/memtest.py new file mode 100644 index 000000000..cfcefbcb9 --- /dev/null +++ b/tests/configs/memtest.py @@ -0,0 +1,88 @@ +# Copyright (c) 2006 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Ron Dreslinski + +import m5 +from m5.objects import * + +# -------------------- +# Base L1 Cache +# ==================== + +class L1(BaseCache): + latency = 1 + block_size = 64 + mshrs = 4 + tgts_per_mshr = 8 + protocol = CoherenceProtocol(protocol='moesi') + +# ---------------------- +# Base L2 Cache +# ---------------------- + +class L2(BaseCache): + block_size = 64 + latency = 100 + mshrs = 92 + tgts_per_mshr = 16 + write_buffers = 8 + +nb_cores = 1 +cpus = [ MemTest(max_loads=1e12) for i in xrange(nb_cores) ] + +# system simulated +system = System(cpu = cpus, funcmem = PhysicalMemory(), + physmem = PhysicalMemory(), membus = Bus()) + +# l2cache & bus +system.toL2Bus = Bus() +system.l2c = L2(size='4MB', assoc=8) +system.l2c.cpu_side = system.toL2Bus.port + +# connect l2c to membus +system.l2c.mem_side = system.membus.port + +# add L1 caches +for cpu in cpus: + cpu.l1c = L1(size = '32kB', assoc = 4) + cpu.l1c.cpu_side = cpu.test + cpu.l1c.mem_side = system.toL2Bus.port + system.funcmem.port = cpu.functional + + +# connect memory to membus +system.physmem.port = system.membus.port + + +# ----------------------- +# run simulation +# ----------------------- + +root = Root( system = system ) +root.system.mem_mode = 'timing' +#root.trace.flags="InstExec" +root.trace.flags="Bus" diff --git a/tests/quick/50.memtest/test.py b/tests/quick/50.memtest/test.py new file mode 100644 index 000000000..e894b8fb8 --- /dev/null +++ b/tests/quick/50.memtest/test.py @@ -0,0 +1,28 @@ +# Copyright (c) 2006 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Ron Dreslinski + -- cgit v1.2.3 From 4f93c43d34f66b164cc67f87e7a75fc500a79fa6 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 00:27:03 -0400 Subject: Don't block responses even if the cache is blocked. --HG-- extra : convert_revision : a1558eb55806b2a3e7e63249601df2c143e2235d --- src/mem/cache/base_cache.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index d7ccca8c0..6b035bc16 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -71,7 +71,7 @@ BaseCache::CachePort::deviceBlockSize() bool BaseCache::CachePort::recvTiming(Packet *pkt) { - if (blocked) + if (pkt->isRequest() && blocked) { DPRINTF(Cache,"Scheduling a retry while blocked\n"); mustSendRetry = true; -- cgit v1.2.3 From 0087061681869c9aaab81c3797020b083a83d46a Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 00:27:41 -0400 Subject: Don't create a response if one isn't needed. --HG-- extra : convert_revision : 37bd230f527f64eb12779157869aae9dcfdde7fd --- src/mem/cache/cache_impl.hh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 9ce8f515d..ac2d7af8b 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -620,7 +620,9 @@ Cache::probe(Packet * &pkt, bool update, CachePort lat = memSidePort->sendAtomic(busPkt); //Be sure to flip the response to a request for coherence - busPkt->makeAtomicResponse(); + if (busPkt->needsResponse()) { + busPkt->makeAtomicResponse(); + } /* if (!(busPkt->flags & SATISFIED)) { // blocked at a higher level, just return -- cgit v1.2.3 From 095d5991f50aaccd2a25792cb7ce44b43a98b29c Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 00:31:24 -0400 Subject: Put a check in so people know not to create more than 8 memtesters. --HG-- extra : convert_revision : 41ab297dc681b2601be1df33aba30c39f49466d8 --- src/cpu/memtest/memtest.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/cpu/memtest/memtest.cc b/src/cpu/memtest/memtest.cc index 186b6ba50..609a07a8e 100644 --- a/src/cpu/memtest/memtest.cc +++ b/src/cpu/memtest/memtest.cc @@ -137,6 +137,8 @@ MemTest::MemTest(const string &name, tickEvent.schedule(0); id = TESTER_ALLOCATOR++; + if (TESTER_ALLOCATOR > 8) + panic("False sharing memtester only allows up to 8 testers"); accessRetry = false; } -- cgit v1.2.3 From bc732b59fd82689490306090974f1f4c06741b0a Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 01:04:37 -0400 Subject: Have cpus send snoop ranges --HG-- extra : convert_revision : 2a1fba141e409ee1d7a0b69b5b21d236e3d4ce68 --- src/cpu/o3/fetch.hh | 2 +- src/cpu/o3/lsq.hh | 2 +- src/cpu/ozone/front_end.hh | 2 +- src/cpu/ozone/lw_lsq.hh | 2 +- src/cpu/simple/atomic.hh | 4 ++-- src/cpu/simple/timing.hh | 2 +- src/mem/cache/base_cache.hh | 16 ++++++++-------- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/cpu/o3/fetch.hh b/src/cpu/o3/fetch.hh index 1a2ca32a4..280bf0e71 100644 --- a/src/cpu/o3/fetch.hh +++ b/src/cpu/o3/fetch.hh @@ -96,7 +96,7 @@ class DefaultFetch /** Returns the address ranges of this device. */ virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1)); } /** Timing version of receive. Handles setting fetch to the * proper status to start fetching. */ diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh index 190734dc2..6b12d75b4 100644 --- a/src/cpu/o3/lsq.hh +++ b/src/cpu/o3/lsq.hh @@ -311,7 +311,7 @@ class LSQ { /** Returns the address ranges of this device. */ virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1)); } /** Timing version of receive. Handles writing back and * completing the load or store that has returned from diff --git a/src/cpu/ozone/front_end.hh b/src/cpu/ozone/front_end.hh index 5ffd3666e..59cf9785c 100644 --- a/src/cpu/ozone/front_end.hh +++ b/src/cpu/ozone/front_end.hh @@ -92,7 +92,7 @@ class FrontEnd /** Returns the address ranges of this device. */ virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1)); } /** Timing version of receive. Handles setting fetch to the * proper status to start fetching. */ diff --git a/src/cpu/ozone/lw_lsq.hh b/src/cpu/ozone/lw_lsq.hh index 347f4569b..9b93ce74f 100644 --- a/src/cpu/ozone/lw_lsq.hh +++ b/src/cpu/ozone/lw_lsq.hh @@ -260,7 +260,7 @@ class OzoneLWLSQ { virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1); } virtual bool recvTiming(PacketPtr pkt); diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh index b602af558..52afd76ef 100644 --- a/src/cpu/simple/atomic.hh +++ b/src/cpu/simple/atomic.hh @@ -104,9 +104,9 @@ class AtomicSimpleCPU : public BaseSimpleCPU virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } - }; + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1)); } + }; CpuPort icachePort; CpuPort dcachePort; diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh index b65eebd99..18e13aeb2 100644 --- a/src/cpu/simple/timing.hh +++ b/src/cpu/simple/timing.hh @@ -92,7 +92,7 @@ class TimingSimpleCPU : public BaseSimpleCPU virtual void getDeviceAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) - { resp.clear(); snoop.clear(); } + { resp.clear(); snoop.clear(); snoop.push_back(RangeSize(0,-1)); } struct TickEvent : public Event { diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index 2e92e7730..de8a19cac 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -156,7 +156,7 @@ class BaseCache : public MemObject if (status == Port::RangeChange){ if (!isCpuSide) { cpuSidePort->sendStatusChange(Port::RangeChange); - if (topLevelCache && !snoopRangesSent) { + if (!snoopRangesSent) { snoopRangesSent = true; memSidePort->sendStatusChange(Port::RangeChange); } @@ -568,14 +568,14 @@ class BaseCache : public MemObject { //This is where snoops get updated AddrRangeList dummy; - if (!topLevelCache) - { +// if (!topLevelCache) +// { cpuSidePort->getPeerAddressRanges(dummy, snoop); - } - else - { - snoop.push_back(RangeSize(0,-1)); - } +// } +// else +// { +// snoop.push_back(RangeSize(0,-1)); +// } return; } -- cgit v1.2.3 From afce51d10ad1441247b39edb2a61b85d3cd7af04 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 16:37:02 -0400 Subject: Set size properly on uncache accesses Don't use the senderState after you get a succesful sendTiming. Not guarnteed to be correct src/mem/cache/base_cache.cc: src/mem/cache/base_cache.hh: src/mem/cache/cache.hh: src/mem/cache/cache_impl.hh: src/mem/cache/miss/blocking_buffer.cc: src/mem/cache/miss/blocking_buffer.hh: src/mem/cache/miss/miss_queue.hh: Don't use the senderState after you get a succesful sendTiming. Not guarnteed to be correct --HG-- extra : convert_revision : 2e8e812bf7fd3ba2b4cba7f7173cb41862f761af --- src/mem/cache/base_cache.cc | 16 ++++++++++++---- src/mem/cache/base_cache.hh | 5 ++++- src/mem/cache/cache.hh | 2 +- src/mem/cache/cache_impl.hh | 6 +++--- src/mem/cache/miss/blocking_buffer.cc | 6 +++--- src/mem/cache/miss/blocking_buffer.hh | 2 +- src/mem/cache/miss/miss_queue.cc | 13 ++++++------- src/mem/cache/miss/miss_queue.hh | 2 +- 8 files changed, 31 insertions(+), 21 deletions(-) diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index 6b035bc16..1a0f63d17 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -109,10 +109,11 @@ BaseCache::CachePort::recvRetry() if (!isCpuSide) { pkt = cache->getPacket(); + MSHR* mshr = (MSHR*)pkt->senderState; bool success = sendTiming(pkt); DPRINTF(Cache, "Address %x was %s in sending the timing request\n", pkt->getAddr(), success ? "succesful" : "unsuccesful"); - cache->sendResult(pkt, success); + cache->sendResult(pkt, mshr, success); if (success && cache->doMasterRequest()) { //Still more to issue, rerequest in 1 cycle @@ -123,7 +124,9 @@ BaseCache::CachePort::recvRetry() } else { - pkt = cache->getCoherencePacket(); + //pkt = cache->getCoherencePacket(); + //We save the packet, no reordering on CSHRS + pkt = cshrRetry; bool success = sendTiming(pkt); if (success && cache->doSlaveRequest()) { @@ -182,10 +185,11 @@ BaseCache::CacheEvent::process() { //MSHR pkt = cachePort->cache->getPacket(); + MSHR* mshr = (MSHR*) pkt->senderState; bool success = cachePort->sendTiming(pkt); DPRINTF(Cache, "Address %x was %s in sending the timing request\n", pkt->getAddr(), success ? "succesful" : "unsuccesful"); - cachePort->cache->sendResult(pkt, success); + cachePort->cache->sendResult(pkt, mshr, success); if (success && cachePort->cache->doMasterRequest()) { //Still more to issue, rerequest in 1 cycle @@ -198,7 +202,11 @@ BaseCache::CacheEvent::process() //CSHR pkt = cachePort->cache->getCoherencePacket(); bool success = cachePort->sendTiming(pkt); - if (success && cachePort->cache->doSlaveRequest()) + if (!success) { + //Need to send on a retry + cachePort->cshrRetry = pkt; + } + else if (cachePort->cache->doSlaveRequest()) { //Still more to issue, rerequest in 1 cycle pkt = NULL; diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index de8a19cac..c45f3b71b 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -72,6 +72,7 @@ enum RequestCause{ Request_PF }; +class MSHR; /** * A basic cache interface. Implements some common functions for speed. */ @@ -112,6 +113,8 @@ class BaseCache : public MemObject bool isCpuSide; std::list drainList; + + Packet *cshrRetry; }; struct CacheEvent : public Event @@ -177,7 +180,7 @@ class BaseCache : public MemObject fatal("No implementation"); } - virtual void sendResult(Packet* &pkt, bool success) + virtual void sendResult(Packet* &pkt, MSHR* mshr, bool success) { fatal("No implementation"); diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index 4b8870c95..923bf8255 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -175,7 +175,7 @@ class Cache : public BaseCache * @param pkt The request. * @param success True if the request was sent successfully. */ - virtual void sendResult(Packet * &pkt, bool success); + virtual void sendResult(Packet * &pkt, MSHR* mshr, bool success); /** * Handles a response (cache line fill/write ack) from the bus. diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index ac2d7af8b..32f561d71 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -287,10 +287,10 @@ Cache::getPacket() template void -Cache::sendResult(PacketPtr &pkt, bool success) +Cache::sendResult(PacketPtr &pkt, MSHR* mshr, bool success) { if (success) { - missQueue->markInService(pkt); + missQueue->markInService(pkt, mshr); //Temp Hack for UPGRADES if (pkt->cmd == Packet::UpgradeReq) { handleResponse(pkt); @@ -444,7 +444,7 @@ Cache::snoop(Packet * &pkt) if (pkt->isInvalidate()) { //This must be an upgrade or other cache will take ownership - missQueue->markInService(mshr->pkt); + missQueue->markInService(mshr->pkt, mshr); } return; } diff --git a/src/mem/cache/miss/blocking_buffer.cc b/src/mem/cache/miss/blocking_buffer.cc index 7a6ea9133..f7aacff89 100644 --- a/src/mem/cache/miss/blocking_buffer.cc +++ b/src/mem/cache/miss/blocking_buffer.cc @@ -123,12 +123,12 @@ BlockingBuffer::restoreOrigCmd(Packet * &pkt) } void -BlockingBuffer::markInService(Packet * &pkt) +BlockingBuffer::markInService(Packet * &pkt, MSHR* mshr) { if (!pkt->isCacheFill() && pkt->isWrite()) { // Forwarding a write/ writeback, don't need to change // the command - assert((MSHR*)pkt->senderState == &wb); + assert(mshr == &wb); cache->clearMasterRequest(Request_WB); if (!pkt->needsResponse()) { assert(wb.getNumTargets() == 0); @@ -138,7 +138,7 @@ BlockingBuffer::markInService(Packet * &pkt) wb.inService = true; } } else { - assert((MSHR*)pkt->senderState == &miss); + assert(mshr == &miss); cache->clearMasterRequest(Request_MSHR); if (!pkt->needsResponse()) { assert(miss.getNumTargets() == 0); diff --git a/src/mem/cache/miss/blocking_buffer.hh b/src/mem/cache/miss/blocking_buffer.hh index 641d5a798..f7069696c 100644 --- a/src/mem/cache/miss/blocking_buffer.hh +++ b/src/mem/cache/miss/blocking_buffer.hh @@ -152,7 +152,7 @@ public: * are successfully sent. * @param pkt The request that was sent on the bus. */ - void markInService(Packet * &pkt); + void markInService(Packet * &pkt, MSHR* mshr); /** * Frees the resources of the pktuest and unblock the cache. diff --git a/src/mem/cache/miss/miss_queue.cc b/src/mem/cache/miss/miss_queue.cc index 273b6587f..bdb7a39c8 100644 --- a/src/mem/cache/miss/miss_queue.cc +++ b/src/mem/cache/miss/miss_queue.cc @@ -372,7 +372,7 @@ MissQueue::allocateMiss(Packet * &pkt, int size, Tick time) MSHR* MissQueue::allocateWrite(Packet * &pkt, int size, Tick time) { - MSHR* mshr = wb.allocate(pkt,blkSize); + MSHR* mshr = wb.allocate(pkt,size); mshr->order = order++; //REMOVING COMPRESSION FOR NOW @@ -446,7 +446,7 @@ MissQueue::handleMiss(Packet * &pkt, int blkSize, Tick time) /** * @todo Add write merging here. */ - mshr = allocateWrite(pkt, blkSize, time); + mshr = allocateWrite(pkt, pkt->getSize(), time); return; } @@ -526,9 +526,8 @@ MissQueue::restoreOrigCmd(Packet * &pkt) } void -MissQueue::markInService(Packet * &pkt) +MissQueue::markInService(Packet * &pkt, MSHR* mshr) { - assert(pkt->senderState != 0); bool unblock = false; BlockedCause cause = NUM_BLOCKED_CAUSES; @@ -540,7 +539,7 @@ MissQueue::markInService(Packet * &pkt) // Forwarding a write/ writeback, don't need to change // the command unblock = wb.isFull(); - wb.markInService((MSHR*)pkt->senderState); + wb.markInService(mshr); if (!wb.havePending()){ cache->clearMasterRequest(Request_WB); } @@ -551,11 +550,11 @@ MissQueue::markInService(Packet * &pkt) } } else { unblock = mq.isFull(); - mq.markInService((MSHR*)pkt->senderState); + mq.markInService(mshr); if (!mq.havePending()){ cache->clearMasterRequest(Request_MSHR); } - if (((MSHR*)(pkt->senderState))->originalCmd == Packet::HardPFReq) { + if (mshr->originalCmd == Packet::HardPFReq) { DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n", cache->name()); //Also clear pending if need be diff --git a/src/mem/cache/miss/miss_queue.hh b/src/mem/cache/miss/miss_queue.hh index 505d1f90c..179638d2b 100644 --- a/src/mem/cache/miss/miss_queue.hh +++ b/src/mem/cache/miss/miss_queue.hh @@ -256,7 +256,7 @@ class MissQueue * are successfully sent. * @param pkt The request that was sent on the bus. */ - void markInService(Packet * &pkt); + void markInService(Packet * &pkt, MSHR* mshr); /** * Collect statistics and free resources of a satisfied pktuest. -- cgit v1.2.3 From 45732376f6f781cf3671b830321e96478a01dd3d Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 16:47:55 -0400 Subject: Add more DPRINTF's fix a supply condition. src/mem/cache/cache_impl.hh: Add more usefull DPRINTF's REmove the PC to get rid of asserts --HG-- extra : convert_revision : 3f6d832b138d058dbe79bb5f42bd2db9c50b35b5 --- src/mem/cache/cache_impl.hh | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index ac2d7af8b..b6556f252 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -236,9 +236,9 @@ Cache::access(PacketPtr &pkt) missQueue->doWriteback(writebacks.front()); writebacks.pop_front(); } - DPRINTF(Cache, "%s %x %s blk_addr: %x pc %x\n", pkt->cmdString(), + DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(), pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss", - pkt->getAddr() & ~((Addr)blkSize - 1), pkt->req->getPC()); + pkt->getAddr() & ~((Addr)blkSize - 1)); if (blk) { // Hit hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++; @@ -314,9 +314,11 @@ Cache::handleResponse(Packet * &pkt) blk = tags->findBlock(pkt); CacheBlk::State old_state = (blk) ? blk->status : 0; PacketList writebacks; + CacheBlk::State new_state = coherence->getNewState(pkt,old_state); + DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n", + pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state); blk = tags->handleFill(blk, (MSHR*)pkt->senderState, - coherence->getNewState(pkt,old_state), - writebacks, pkt); + new_state, writebacks, pkt); while (!writebacks.empty()) { missQueue->doWriteback(writebacks.front()); writebacks.pop_front(); @@ -387,6 +389,7 @@ Cache::snoop(Packet * &pkt) //If the outstanding request was an invalidate (upgrade,readex,..) //Then we need to ACK the request until we get the data //Also NACK if the outstanding request is not a cachefill (writeback) + assert(!(pkt->flags & SATISFIED)); pkt->flags |= SATISFIED; pkt->flags |= NACKED_LINE; assert("Don't detect these on the other side yet\n"); @@ -426,6 +429,7 @@ Cache::snoop(Packet * &pkt) if (pkt->isRead()) { //Only Upgrades don't get here //Supply the data + assert(!(pkt->flags & SATISFIED)); pkt->flags |= SATISFIED; //If we are in an exclusive protocol, make it ask again @@ -454,10 +458,16 @@ Cache::snoop(Packet * &pkt) CacheBlk::State new_state; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); if (satisfy) { + DPRINTF(Cache, "Cache snooped a %c request and now supplying data," + "new state is %i\n", + pkt->cmdString(), new_state); + tags->handleSnoop(blk, new_state, pkt); respondToSnoop(pkt, curTick + hitLatency); return; } + if (blk) DPRINTF(Cache, "Cache snooped a %c request, new state is %i\n", + pkt->cmdString(), new_state); tags->handleSnoop(blk, new_state); } @@ -675,9 +685,15 @@ Cache::snoopProbe(PacketPtr &pkt) CacheBlk::State new_state = 0; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); if (satisfy) { + DPRINTF(Cache, "Cache snooped a %c request and now supplying data," + "new state is %i\n", + pkt->cmdString(), new_state); + tags->handleSnoop(blk, new_state, pkt); return hitLatency; } + if (blk) DPRINTF(Cache, "Cache snooped a %c request, new state is %i\n", + pkt->cmdString(), new_state); tags->handleSnoop(blk, new_state); return 0; } -- cgit v1.2.3 From b9fb4d4870dd45c552fd4cd5e531e9626754f19f Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 17:13:50 -0400 Subject: Make memtest work with 8 memtesters src/mem/physical.cc: Update comment to match memtest use src/python/m5/objects/PhysicalMemory.py: Make memtester have a way to connect functionally tests/configs/memtest.py: Properly create 8 memtesters and connect them to the memory system --HG-- extra : convert_revision : e5a2dd9c8918d58051b553b5c6a14785d48b34ca --- src/mem/physical.cc | 2 +- src/python/m5/objects/PhysicalMemory.py | 1 + tests/configs/memtest.py | 10 ++++++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/mem/physical.cc b/src/mem/physical.cc index 070693442..96d78bd99 100644 --- a/src/mem/physical.cc +++ b/src/mem/physical.cc @@ -231,7 +231,7 @@ PhysicalMemory::getPort(const std::string &if_name, int idx) port = new MemoryPort(name() + "-port", this); return port; } else if (if_name == "functional") { - /* special port for functional writes at startup. */ + /* special port for functional writes at startup. And for memtester */ return new MemoryPort(name() + "-funcport", this); } else { panic("PhysicalMemory::getPort: unknown port %s requested", if_name); diff --git a/src/python/m5/objects/PhysicalMemory.py b/src/python/m5/objects/PhysicalMemory.py index dd3ffd651..4e097543d 100644 --- a/src/python/m5/objects/PhysicalMemory.py +++ b/src/python/m5/objects/PhysicalMemory.py @@ -5,6 +5,7 @@ from MemObject import * class PhysicalMemory(MemObject): type = 'PhysicalMemory' port = Port("the access port") + functional = Port("Functional Access Port") range = Param.AddrRange(AddrRange('128MB'), "Device Address") file = Param.String('', "memory mapped file") latency = Param.Latency(Parent.clock, "latency of an access") diff --git a/tests/configs/memtest.py b/tests/configs/memtest.py index cfcefbcb9..c5cd0246d 100644 --- a/tests/configs/memtest.py +++ b/tests/configs/memtest.py @@ -51,7 +51,8 @@ class L2(BaseCache): tgts_per_mshr = 16 write_buffers = 8 -nb_cores = 1 +#MAX CORES IS 8 with the fals sharing method +nb_cores = 8 cpus = [ MemTest(max_loads=1e12) for i in xrange(nb_cores) ] # system simulated @@ -66,12 +67,17 @@ system.l2c.cpu_side = system.toL2Bus.port # connect l2c to membus system.l2c.mem_side = system.membus.port +which_port = 0 # add L1 caches for cpu in cpus: cpu.l1c = L1(size = '32kB', assoc = 4) cpu.l1c.cpu_side = cpu.test cpu.l1c.mem_side = system.toL2Bus.port - system.funcmem.port = cpu.functional + if which_port == 0: + system.funcmem.port = cpu.functional + which_port = 1 + else: + system.funcmem.functional = cpu.functional # connect memory to membus -- cgit v1.2.3 From fd27c229b6bae09098864361dd6e51065fbaec3c Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 17:18:34 -0400 Subject: Fix a bitwise operation that was accidentally a logical operation. --HG-- extra : convert_revision : 30f64bcb6bea47fd8cd6d77b0df17eff04dbbad0 --- src/mem/cache/cache_impl.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 8d028e97f..7764b97c1 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -73,7 +73,7 @@ doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide) handleResponse(pkt); else { //Check if we should do the snoop - if (pkt->flags && SNOOP_COMMIT) + if (pkt->flags & SNOOP_COMMIT) snoop(pkt); } } -- cgit v1.2.3 From 094c6de663c1203d3bc64f8d973daba1690e2d33 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 17:25:43 -0400 Subject: Multiprogrammed workload, need to generate ref's for it yet. But Nate wanted the config. Not sure on the naming convention for tests. --HG-- extra : convert_revision : 052c2fc95dc7e2bbd78d4a177600d7ec2a530a4c --- tests/quick/00.hello.mp/test.py | 44 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 tests/quick/00.hello.mp/test.py diff --git a/tests/quick/00.hello.mp/test.py b/tests/quick/00.hello.mp/test.py new file mode 100644 index 000000000..91fbfb7ed --- /dev/null +++ b/tests/quick/00.hello.mp/test.py @@ -0,0 +1,44 @@ +# Copyright (c) 2006 The Regents of The University of Michigan +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Ron Dreslinski + +# workload +benchmarks = [ + "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'", + "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'", + "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'", + "tests/test-progs/hello/bin/alpha/linux/hello", "'hello'", + ] + +for i, cpu in zip(range(len(cpus)), root.system.cpu): + p = LiveProcess() + p.executable = benchmarks[i*2] + p.cmd = benchmarks[(i*2)+1] + root.system.cpu[i].workload = p + root.system.cpu[i].max_insts_all_threads = 10000000 +#root.system.cpu.workload = LiveProcess(cmd = 'hello', + # executable = binpath('hello')) -- cgit v1.2.3 From c4dba7a8ed496c2e534b6caa8754678d642124c7 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 17:30:54 -0400 Subject: Fix a typo in the printf --HG-- extra : convert_revision : bfa8ffae0a9bef25ceca168ff376ba816abf23f3 --- src/mem/cache/cache_impl.hh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 7764b97c1..bde7ac04b 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -458,7 +458,7 @@ Cache::snoop(Packet * &pkt) CacheBlk::State new_state; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); if (satisfy) { - DPRINTF(Cache, "Cache snooped a %c request and now supplying data," + DPRINTF(Cache, "Cache snooped a %s request and now supplying data," "new state is %i\n", pkt->cmdString(), new_state); @@ -466,7 +466,7 @@ Cache::snoop(Packet * &pkt) respondToSnoop(pkt, curTick + hitLatency); return; } - if (blk) DPRINTF(Cache, "Cache snooped a %c request, new state is %i\n", + if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n", pkt->cmdString(), new_state); tags->handleSnoop(blk, new_state); } @@ -685,14 +685,14 @@ Cache::snoopProbe(PacketPtr &pkt) CacheBlk::State new_state = 0; bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state); if (satisfy) { - DPRINTF(Cache, "Cache snooped a %c request and now supplying data," + DPRINTF(Cache, "Cache snooped a %s request and now supplying data," "new state is %i\n", pkt->cmdString(), new_state); tags->handleSnoop(blk, new_state, pkt); return hitLatency; } - if (blk) DPRINTF(Cache, "Cache snooped a %c request, new state is %i\n", + if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n", pkt->cmdString(), new_state); tags->handleSnoop(blk, new_state); return 0; -- cgit v1.2.3 From 727dea78c4b603a63d6c8bee10d317cb2905ffd4 Mon Sep 17 00:00:00 2001 From: Ron Dreslinski Date: Mon, 9 Oct 2006 17:31:58 -0400 Subject: Update configs for cpu_id tests/configs/o3-timing-mp.py: tests/configs/simple-atomic-mp.py: tests/configs/simple-timing-mp.py: Update config for cpu_id --HG-- extra : convert_revision : 32a1971997920473164ba12f2b121cb640bad7ac --- tests/configs/o3-timing-mp.py | 4 ++-- tests/configs/simple-atomic-mp.py | 4 ++-- tests/configs/simple-timing-mp.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/configs/o3-timing-mp.py b/tests/configs/o3-timing-mp.py index 881c23156..55af8be0d 100644 --- a/tests/configs/o3-timing-mp.py +++ b/tests/configs/o3-timing-mp.py @@ -54,7 +54,7 @@ class L2(BaseCache): write_buffers = 8 nb_cores = 4 -cpus = [ DetailedO3CPU() for i in xrange(nb_cores) ] +cpus = [ DetailedO3CPU(cpu_id=i) for i in xrange(nb_cores) ] # system simulated system = System(cpu = cpus, physmem = PhysicalMemory(), membus = @@ -86,5 +86,5 @@ system.physmem.port = system.membus.port root = Root( system = system ) root.system.mem_mode = 'timing' -root.trace.flags="Bus Cache" +#root.trace.flags="Bus Cache" #root.trace.flags = "BusAddrRanges" diff --git a/tests/configs/simple-atomic-mp.py b/tests/configs/simple-atomic-mp.py index cc1a36dda..eaa6ec66e 100644 --- a/tests/configs/simple-atomic-mp.py +++ b/tests/configs/simple-atomic-mp.py @@ -52,10 +52,10 @@ class L2(BaseCache): write_buffers = 8 nb_cores = 4 -cpus = [ AtomicSimpleCPU() for i in xrange(nb_cores) ] +cpus = [ AtomicSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ] # system simulated -system = System(cpu = cpus, physmem = PhysicalMemory(), membus = +system = System(cpu = cpus, physmem = PhysicalMemory(range = AddrRange('1024MB')), membus = Bus()) # l2cache & bus diff --git a/tests/configs/simple-timing-mp.py b/tests/configs/simple-timing-mp.py index 9fc5f3874..8f9ab0dde 100644 --- a/tests/configs/simple-timing-mp.py +++ b/tests/configs/simple-timing-mp.py @@ -52,7 +52,7 @@ class L2(BaseCache): write_buffers = 8 nb_cores = 4 -cpus = [ TimingSimpleCPU() for i in xrange(nb_cores) ] +cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ] # system simulated system = System(cpu = cpus, physmem = PhysicalMemory(), membus = -- cgit v1.2.3