summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabe Black <gblack@eecs.umich.edu>2006-11-06 19:52:32 -0500
committerGabe Black <gblack@eecs.umich.edu>2006-11-06 19:52:32 -0500
commit02abca6b9e4e21d8d89eb83eabab3be8ac10c9d8 (patch)
tree6c923510076654885f31d2328c4853c5974699a8
parentdd14c86ec8afb3a98d55a58eaafd8b85dd651bd6 (diff)
parentbf3223d7ce681db8ca59dac49c6b44b672012e5d (diff)
downloadgem5-02abca6b9e4e21d8d89eb83eabab3be8ac10c9d8.tar.xz
Merge zizzer.eecs.umich.edu:/bk/newmem/
into zeep.eecs.umich.edu:/home/gblack/m5/newmemmemops src/SConscript: SCCS merged --HG-- extra : convert_revision : f130c8a2d33f58d857e5d5a02bb9698c1bceb23b
-rw-r--r--SConstruct6
-rw-r--r--src/arch/sparc/floatregfile.cc4
-rw-r--r--src/arch/sparc/intregfile.cc4
-rw-r--r--src/base/pollevent.cc3
-rw-r--r--src/base/random.cc24
-rw-r--r--src/base/random.hh1
-rw-r--r--src/base/stats/flags.hh2
-rw-r--r--src/base/time.hh44
-rw-r--r--src/cpu/base.cc20
-rw-r--r--src/cpu/base.hh14
-rw-r--r--src/cpu/inst_seq.hh2
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh1
-rw-r--r--src/cpu/simple/atomic.cc12
-rw-r--r--src/cpu/simple/timing.cc18
-rw-r--r--src/dev/isa_fake.cc137
-rw-r--r--src/dev/isa_fake.hh32
-rw-r--r--src/python/m5/objects/Pci.py3
-rw-r--r--src/python/m5/objects/Tsunami.py6
-rw-r--r--src/sim/byteswap.hh6
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/config.ini6
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/config.out82
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt42
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout6
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini2
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out2
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini6
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out82
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt14
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout6
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.ini6
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.out82
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt42
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout6
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini2
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out2
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout4
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini6
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out82
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt14
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/stderr1
-rw-r--r--tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout6
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini6
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out82
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt724
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr3
-rw-r--r--tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini22
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out90
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr10
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout6
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini21
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out89
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr4
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout6
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini22
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out90
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console2
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt397
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr6
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini21
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out89
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt136
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr8
-rw-r--r--tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout8
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini4
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out4
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt8
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout4
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini6
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out74
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt14
-rw-r--r--tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout6
83 files changed, 1517 insertions, 1342 deletions
diff --git a/SConstruct b/SConstruct
index dac4d137c..59d40d5cc 100644
--- a/SConstruct
+++ b/SConstruct
@@ -270,6 +270,12 @@ if not conf.CheckLib(py_version_name):
print "Error: can't find Python library", py_version_name
Exit(1)
+# On Solaris you need to use libsocket for socket ops
+if not conf.CheckLibWithHeader(None, 'sys/socket.h', 'C++', 'accept(0,0,0);'):
+ if not conf.CheckLibWithHeader('socket', 'sys/socket.h', 'C++', 'accept(0,0,0);'):
+ print "Can't find library with socket calls (e.g. accept())"
+ Exit(1)
+
# Check for zlib. If the check passes, libz will be automatically
# added to the LIBS environment variable.
if not conf.CheckLibWithHeader('z', 'zlib.h', 'C++'):
diff --git a/src/arch/sparc/floatregfile.cc b/src/arch/sparc/floatregfile.cc
index 3afe6ef54..7f3d5a758 100644
--- a/src/arch/sparc/floatregfile.cc
+++ b/src/arch/sparc/floatregfile.cc
@@ -34,6 +34,8 @@
#include "sim/byteswap.hh"
#include "sim/serialize.hh"
+#include <string.h>
+
using namespace SparcISA;
using namespace std;
@@ -55,7 +57,7 @@ string SparcISA::getFloatRegName(RegIndex index)
void FloatRegFile::clear()
{
- bzero(regSpace, sizeof(regSpace));
+ memset(regSpace, 0, sizeof(regSpace));
}
FloatReg FloatRegFile::readReg(int floatReg, int width)
diff --git a/src/arch/sparc/intregfile.cc b/src/arch/sparc/intregfile.cc
index 164f194dd..0e313dc94 100644
--- a/src/arch/sparc/intregfile.cc
+++ b/src/arch/sparc/intregfile.cc
@@ -33,6 +33,8 @@
#include "base/trace.hh"
#include "sim/serialize.hh"
+#include <string.h>
+
using namespace SparcISA;
using namespace std;
@@ -62,7 +64,7 @@ void IntRegFile::clear()
for (x = 0; x < MaxGL; x++)
memset(regGlobals[x], 0, sizeof(IntReg) * RegsPerFrame);
for(int x = 0; x < 2 * NWindows; x++)
- bzero(regSegments[x], sizeof(IntReg) * RegsPerFrame);
+ memset(regSegments[x], 0, sizeof(IntReg) * RegsPerFrame);
}
IntRegFile::IntRegFile()
diff --git a/src/base/pollevent.cc b/src/base/pollevent.cc
index 2743cd95d..fd5b09d28 100644
--- a/src/base/pollevent.cc
+++ b/src/base/pollevent.cc
@@ -30,6 +30,9 @@
#include <sys/ioctl.h>
#include <sys/types.h>
+#if defined(__sun__)
+#include <sys/file.h>
+#endif
#include <fcntl.h>
#include <signal.h>
diff --git a/src/base/random.cc b/src/base/random.cc
index e135b55f5..82c9e3566 100644
--- a/src/base/random.cc
+++ b/src/base/random.cc
@@ -32,6 +32,10 @@
#include <cstdlib>
#include <cmath>
+#if defined(__sun__)
+#include <ieeefp.h>
+#endif
+
#include "sim/param.hh"
#include "base/random.hh"
#include "base/trace.hh"
@@ -65,12 +69,27 @@ getLong()
return mrand48();
}
+double
+m5round(double r)
+{
+#if defined(__sun__)
+ double val;
+ fp_rnd oldrnd = fpsetround(FP_RN);
+ val = rint(r);
+ fpsetround(oldrnd);
+ return val;
+#else
+ return round(r);
+#endif
+}
+
int64_t
getUniform(int64_t min, int64_t max)
{
double r;
r = drand48() * (max-min) + min;
- return (int64_t)round(r);
+
+ return (int64_t)m5round(r);
}
uint64_t
@@ -78,7 +97,8 @@ getUniformPos(uint64_t min, uint64_t max)
{
double r;
r = drand48() * (max-min) + min;
- return (uint64_t)round(r);
+
+ return (uint64_t)m5round(r);
}
diff --git a/src/base/random.hh b/src/base/random.hh
index b5eb39f94..40d62da7f 100644
--- a/src/base/random.hh
+++ b/src/base/random.hh
@@ -36,6 +36,7 @@
long getLong();
double getDouble();
+double m5random(double r);
uint64_t getUniformPos(uint64_t min, uint64_t max);
int64_t getUniform(int64_t min, int64_t max);
diff --git a/src/base/stats/flags.hh b/src/base/stats/flags.hh
index ada1a4a87..69f73f66a 100644
--- a/src/base/stats/flags.hh
+++ b/src/base/stats/flags.hh
@@ -36,7 +36,7 @@ namespace Stats {
* Define the storage for format flags.
* @todo Can probably shrink this.
*/
-typedef u_int32_t StatFlags;
+typedef uint32_t StatFlags;
/** Nothing extra to print. */
const StatFlags none = 0x00000000;
diff --git a/src/base/time.hh b/src/base/time.hh
index 24e8a8a53..7aa4c50db 100644
--- a/src/base/time.hh
+++ b/src/base/time.hh
@@ -65,4 +65,48 @@ Time operator-(const Time &l, const Time &r);
std::ostream &operator<<(std::ostream &out, const Time &time);
+
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)time.h 8.2 (Berkeley) 7/10/94
+ */
+
+#if defined(__sun__)
+#define timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif
+
#endif // __SIM_TIME_HH__
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index 66c5d3e11..4c243a2e9 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -254,6 +254,26 @@ BaseCPU::regStats()
#endif
}
+Tick
+BaseCPU::nextCycle()
+{
+ Tick next_tick = curTick + clock - 1;
+ next_tick -= (next_tick % clock);
+ return next_tick;
+}
+
+Tick
+BaseCPU::nextCycle(Tick begin_tick)
+{
+ Tick next_tick = begin_tick;
+
+ while (next_tick < curTick)
+ next_tick += clock;
+
+ next_tick -= (next_tick % clock);
+ assert(next_tick >= curTick);
+ return next_tick;
+}
void
BaseCPU::registerThreadContexts()
diff --git a/src/cpu/base.hh b/src/cpu/base.hh
index 79d22c992..9257778ef 100644
--- a/src/cpu/base.hh
+++ b/src/cpu/base.hh
@@ -77,6 +77,20 @@ class BaseCPU : public MemObject
inline Tick cycles(int numCycles) const { return clock * numCycles; }
inline Tick curCycle() const { return curTick / clock; }
+ /** The next cycle the CPU should be scheduled, given a cache
+ * access or quiesce event returning on this cycle. This function
+ * may return curTick if the CPU should run on the current cycle.
+ */
+ Tick nextCycle();
+
+ /** The next cycle the CPU should be scheduled, given a cache
+ * access or quiesce event returning on the given Tick. This
+ * function may return curTick if the CPU should run on the
+ * current cycle.
+ * @param begin_tick The tick that the event is completing on.
+ */
+ Tick nextCycle(Tick begin_tick);
+
#if FULL_SYSTEM
protected:
// uint64_t interrupts[TheISA::NumInterruptLevels];
diff --git a/src/cpu/inst_seq.hh b/src/cpu/inst_seq.hh
index e7acd215f..21e04ed25 100644
--- a/src/cpu/inst_seq.hh
+++ b/src/cpu/inst_seq.hh
@@ -32,8 +32,6 @@
#ifndef __STD_TYPES_HH__
#define __STD_TYPES_HH__
-#include <stdint.h>
-
// inst sequence type, used to order instructions in the ready list,
// if this rolls over the ready list order temporarily will get messed
// up, but execution will continue and complete correctly
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index d940d7cb3..9a0e48819 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -131,6 +131,7 @@ LSQUnit<Impl>::init(Params *params, LSQ *lsq_ptr, unsigned maxLQEntries,
usedPorts = 0;
cachePorts = params->cachePorts;
+ retryPkt = NULL;
memDepViolator = NULL;
blockedLoadSeqNum = 0;
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 72249be41..4f68cfd6f 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -180,9 +180,7 @@ AtomicSimpleCPU::resume()
changeState(SimObject::Running);
if (thread->status() == ThreadContext::Active) {
if (!tickEvent.scheduled()) {
- Tick nextTick = curTick + cycles(1) - 1;
- nextTick -= (nextTick % (cycles(1)));
- tickEvent.schedule(nextTick);
+ tickEvent.schedule(nextCycle());
}
}
}
@@ -211,9 +209,7 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
ThreadContext *tc = threadContexts[i];
if (tc->status() == ThreadContext::Active && _status != Running) {
_status = Running;
- Tick nextTick = curTick + cycles(1) - 1;
- nextTick -= (nextTick % (cycles(1)));
- tickEvent.schedule(nextTick);
+ tickEvent.schedule(nextCycle());
break;
}
}
@@ -231,9 +227,7 @@ AtomicSimpleCPU::activateContext(int thread_num, int delay)
notIdleFraction++;
//Make sure ticks are still on multiples of cycles
- Tick nextTick = curTick + cycles(delay + 1) - 1;
- nextTick -= (nextTick % (cycles(1)));
- tickEvent.schedule(nextTick);
+ tickEvent.schedule(nextCycle(curTick + cycles(delay)));
_status = Running;
}
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 4d57bf6d5..abf316095 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -532,14 +532,13 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
{
if (pkt->isResponse()) {
// delay processing of returned data until next CPU clock edge
- Tick time = pkt->req->getTime();
- while (time < curTick)
- time += lat;
+ Tick mem_time = pkt->req->getTime();
+ Tick next_tick = cpu->nextCycle(mem_time);
- if (time == curTick)
+ if (next_tick == curTick)
cpu->completeIfetch(pkt);
else
- tickEvent.schedule(pkt, time);
+ tickEvent.schedule(pkt, next_tick);
return true;
}
@@ -610,14 +609,13 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
{
if (pkt->isResponse()) {
// delay processing of returned data until next CPU clock edge
- Tick time = pkt->req->getTime();
- while (time < curTick)
- time += lat;
+ Tick mem_time = pkt->req->getTime();
+ Tick next_tick = cpu->nextCycle(mem_time);
- if (time == curTick)
+ if (next_tick == curTick)
cpu->completeDataAccess(pkt);
else
- tickEvent.schedule(pkt, time);
+ tickEvent.schedule(pkt, next_tick);
return true;
}
diff --git a/src/dev/isa_fake.cc b/src/dev/isa_fake.cc
index ccc9a1f7c..103fdd8ce 100644
--- a/src/dev/isa_fake.cc
+++ b/src/dev/isa_fake.cc
@@ -25,18 +25,13 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Miguel Serrano
- * Ali Saidi
+ * Authors: Ali Saidi
*/
/** @file
* Isa Fake Device implementation
*/
-#include <deque>
-#include <string>
-#include <vector>
-
#include "base/trace.hh"
#include "dev/isa_fake.hh"
#include "mem/packet.hh"
@@ -49,74 +44,67 @@ using namespace std;
IsaFake::IsaFake(Params *p)
: BasicPioDevice(p)
{
- pioSize = p->pio_size;
-}
-
-Tick
-IsaFake::read(PacketPtr pkt)
-{
- assert(pkt->result == Packet::Unknown);
- assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
-
- DPRINTF(Tsunami, "read va=%#x size=%d\n", pkt->getAddr(), pkt->getSize());
-
- switch (pkt->getSize()) {
- case sizeof(uint64_t):
- pkt->set(0xFFFFFFFFFFFFFFFFULL);
- break;
- case sizeof(uint32_t):
- pkt->set((uint32_t)0xFFFFFFFF);
- break;
- case sizeof(uint16_t):
- pkt->set((uint16_t)0xFFFF);
- break;
- case sizeof(uint8_t):
- pkt->set((uint8_t)0xFF);
- break;
- default:
- panic("invalid access size(?) for PCI configspace!\n");
- }
- pkt->result = Packet::Success;
- return pioDelay;
-}
-
-Tick
-IsaFake::write(PacketPtr pkt)
-{
- DPRINTF(Tsunami, "write - va=%#x size=%d \n", pkt->getAddr(), pkt->getSize());
- pkt->result = Packet::Success;
- return pioDelay;
-}
+ if (!params()->retBadAddr)
+ pioSize = p->pio_size;
-BadAddr::BadAddr(Params *p)
- : BasicPioDevice(p)
-{
+ memset(&retData, p->retData, sizeof(retData));
}
void
-BadAddr::init()
+IsaFake::init()
{
// Only init this device if it's connected to anything.
if (pioPort)
PioDevice::init();
}
+
Tick
-BadAddr::read(PacketPtr pkt)
+IsaFake::read(PacketPtr pkt)
{
assert(pkt->result == Packet::Unknown);
- DPRINTF(Tsunami, "read to bad address va=%#x size=%d\n",
- pkt->getAddr(), pkt->getSize());
- pkt->result = Packet::BadAddress;
+
+ if (params()->retBadAddr) {
+ DPRINTF(Tsunami, "read to bad address va=%#x size=%d\n",
+ pkt->getAddr(), pkt->getSize());
+ pkt->result = Packet::BadAddress;
+ } else {
+ assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
+ DPRINTF(Tsunami, "read va=%#x size=%d\n",
+ pkt->getAddr(), pkt->getSize());
+ switch (pkt->getSize()) {
+ case sizeof(uint64_t):
+ pkt->set(retData);
+ break;
+ case sizeof(uint32_t):
+ pkt->set((uint32_t)retData);
+ break;
+ case sizeof(uint16_t):
+ pkt->set((uint16_t)retData);
+ break;
+ case sizeof(uint8_t):
+ pkt->set((uint8_t)retData);
+ break;
+ default:
+ panic("invalid access size!\n");
+ }
+ pkt->result = Packet::Success;
+ }
return pioDelay;
}
Tick
-BadAddr::write(PacketPtr pkt)
+IsaFake::write(PacketPtr pkt)
{
- DPRINTF(Tsunami, "write to bad address va=%#x size=%d \n",
- pkt->getAddr(), pkt->getSize());
- pkt->result = Packet::BadAddress;
+ if (params()->retBadAddr) {
+ DPRINTF(Tsunami, "write to bad address va=%#x size=%d \n",
+ pkt->getAddr(), pkt->getSize());
+ pkt->result = Packet::BadAddress;
+ } else {
+ DPRINTF(Tsunami, "write - va=%#x size=%d \n",
+ pkt->getAddr(), pkt->getSize());
+ pkt->result = Packet::Success;
+ }
return pioDelay;
}
@@ -125,6 +113,8 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(IsaFake)
Param<Addr> pio_addr;
Param<Tick> pio_latency;
Param<Addr> pio_size;
+ Param<bool> ret_bad_addr;
+ Param<uint8_t> ret_data;
SimObjectParam<Platform *> platform;
SimObjectParam<System *> system;
@@ -135,6 +125,8 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(IsaFake)
INIT_PARAM(pio_addr, "Device Address"),
INIT_PARAM(pio_latency, "Programmed IO latency"),
INIT_PARAM(pio_size, "Size of address range"),
+ INIT_PARAM(ret_bad_addr, "Return pkt status BadAddr"),
+ INIT_PARAM(ret_data, "Data to return if not bad addr"),
INIT_PARAM(platform, "platform"),
INIT_PARAM(system, "system object")
@@ -147,40 +139,11 @@ CREATE_SIM_OBJECT(IsaFake)
p->pio_addr = pio_addr;
p->pio_delay = pio_latency;
p->pio_size = pio_size;
+ p->retBadAddr = ret_bad_addr;
+ p->retData = ret_data;
p->platform = platform;
p->system = system;
return new IsaFake(p);
}
REGISTER_SIM_OBJECT("IsaFake", IsaFake)
-
-BEGIN_DECLARE_SIM_OBJECT_PARAMS(BadAddr)
-
- Param<Addr> pio_addr;
- Param<Tick> pio_latency;
- SimObjectParam<Platform *> platform;
- SimObjectParam<System *> system;
-
-END_DECLARE_SIM_OBJECT_PARAMS(BadAddr)
-
-BEGIN_INIT_SIM_OBJECT_PARAMS(BadAddr)
-
- INIT_PARAM(pio_addr, "Device Address"),
- INIT_PARAM(pio_latency, "Programmed IO latency"),
- INIT_PARAM(platform, "platform"),
- INIT_PARAM(system, "system object")
-
-END_INIT_SIM_OBJECT_PARAMS(BadAddr)
-
-CREATE_SIM_OBJECT(BadAddr)
-{
- BadAddr::Params *p = new BadAddr::Params;
- p->name = getInstanceName();
- p->pio_addr = pio_addr;
- p->pio_delay = pio_latency;
- p->platform = platform;
- p->system = system;
- return new BadAddr(p);
-}
-
-REGISTER_SIM_OBJECT("BadAddr", BadAddr)
diff --git a/src/dev/isa_fake.hh b/src/dev/isa_fake.hh
index c781d1ba6..4c195a97f 100644
--- a/src/dev/isa_fake.hh
+++ b/src/dev/isa_fake.hh
@@ -25,8 +25,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Miguel Serrano
- * Ali Saidi
+ * Authors: Ali Saidi
*/
/** @file
@@ -42,10 +41,11 @@
#include "mem/packet.hh"
/**
- * IsaFake is a device that returns -1 on all reads and
- * accepts all writes. It is meant to be placed at an address range
+ * IsaFake is a device that returns, BadAddr, 1 or 0 on all reads and
+ * rites. It is meant to be placed at an address range
* so that an mcheck doesn't occur when an os probes a piece of hw
- * that doesn't exist (e.g. UARTs1-3).
+ * that doesn't exist (e.g. UARTs1-3), or catch requests in the memory system
+ * that have no responders..
*/
class IsaFake : public BasicPioDevice
{
@@ -53,9 +53,12 @@ class IsaFake : public BasicPioDevice
struct Params : public BasicPioDevice::Params
{
Addr pio_size;
+ bool retBadAddr;
+ uint8_t retData;
};
protected:
const Params *params() const { return (const Params*)_params; }
+ uint64_t retData;
public:
/**
@@ -77,23 +80,8 @@ class IsaFake : public BasicPioDevice
* @param data the data to not write.
*/
virtual Tick write(PacketPtr pkt);
-};
-/**
- * BadAddr is a device that fills the packet's result field with "BadAddress".
- * @todo: Consider consolidating with IsaFake and similar classes.
- */
-class BadAddr : public BasicPioDevice
-{
- public:
- struct Params : public BasicPioDevice::Params
- {
- };
-
- BadAddr(Params *p);
- virtual void init();
- virtual Tick read(PacketPtr pkt);
- virtual Tick write(PacketPtr pkt);
+ void init();
};
-#endif // __TSUNAMI_FAKE_HH__
+#endif // __ISA_FAKE_HH__
diff --git a/src/python/m5/objects/Pci.py b/src/python/m5/objects/Pci.py
index 55bf23534..9d40adbfe 100644
--- a/src/python/m5/objects/Pci.py
+++ b/src/python/m5/objects/Pci.py
@@ -57,6 +57,3 @@ class PciDevice(DmaDevice):
pio_latency = Param.Latency('1ns', "Programmed IO latency in simticks")
configdata = Param.PciConfigData(Parent.any, "PCI Config data")
config_latency = Param.Latency('20ns', "Config read or write latency")
-
-class PciFake(PciDevice):
- type = 'PciFake'
diff --git a/src/python/m5/objects/Tsunami.py b/src/python/m5/objects/Tsunami.py
index 42bcab089..78ab65b31 100644
--- a/src/python/m5/objects/Tsunami.py
+++ b/src/python/m5/objects/Tsunami.py
@@ -14,9 +14,11 @@ class TsunamiCChip(BasicPioDevice):
class IsaFake(BasicPioDevice):
type = 'IsaFake'
pio_size = Param.Addr(0x8, "Size of address range")
+ ret_data = Param.UInt8(0xFF, "Default data to return")
+ ret_bad_addr = Param.Bool(False, "Return pkt status bad address on access")
-class BadAddr(BasicPioDevice):
- type = 'BadAddr'
+class BadAddr(IsaFake):
+ ret_bad_addr = Param.Bool(True, "Return pkt status bad address on access")
class TsunamiIO(BasicPioDevice):
type = 'TsunamiIO'
diff --git a/src/sim/byteswap.hh b/src/sim/byteswap.hh
index 7648b8fcd..7b1ae701e 100644
--- a/src/sim/byteswap.hh
+++ b/src/sim/byteswap.hh
@@ -47,6 +47,8 @@
// If one doesn't exist, we pretty much get what is listed below, so it all
// works out
#include <byteswap.h>
+#elif defined (__sun__)
+#include <sys/isa_defs.h>
#else
#include <machine/endian.h>
#endif
@@ -128,12 +130,12 @@ template <typename T> static inline T letobe(T value) {return swap_byte(value);}
//For conversions not involving the guest system, we can define the functions
//conditionally based on the BYTE_ORDER macro and outside of the namespaces
-#if BYTE_ORDER == BIG_ENDIAN
+#if defined(_BIG_ENDIAN) || BYTE_ORDER == BIG_ENDIAN
template <typename T> static inline T htole(T value) {return swap_byte(value);}
template <typename T> static inline T letoh(T value) {return swap_byte(value);}
template <typename T> static inline T htobe(T value) {return value;}
template <typename T> static inline T betoh(T value) {return value;}
-#elif BYTE_ORDER == LITTLE_ENDIAN
+#elif defined(_LITTLE_ENDIAN) || BYTE_ORDER == LITTLE_ENDIAN
template <typename T> static inline T htole(T value) {return value;}
template <typename T> static inline T letoh(T value) {return value;}
template <typename T> static inline T htobe(T value) {return swap_byte(value);}
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.ini b/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.ini
index 86e688c3d..0e351f9aa 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.ini
@@ -102,7 +102,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
numIQEntries=64
numPhysFloatRegs=256
numPhysIntRegs=256
@@ -132,7 +131,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -309,7 +307,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -349,7 +346,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -386,6 +382,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -408,6 +405,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.out b/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.out
index 1b8e6d980..e1b4ace7b 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/config.out
@@ -21,6 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -37,45 +38,6 @@ egid=100
pid=100
ppid=99
-[system.cpu.dcache]
-type=BaseCache
-size=262144
-assoc=2
-block_size=64
-latency=1
-mshrs=10
-tgts_per_mshr=5
-write_buffers=8
-prioritizeRequests=false
-do_copy=false
-protocol=null
-trace_addr=0
-hash_delay=1
-repl=null
-compressed_bus=false
-store_compressed=false
-adaptive_compression=false
-compression_latency=0
-block_size=64
-max_miss_count=0
-addr_range=[0,18446744073709551615]
-split=false
-split_size=0
-lifo=false
-two_queue=false
-prefetch_miss=false
-prefetch_access=false
-prefetcher_size=100
-prefetch_past_page=false
-prefetch_serial_squash=false
-prefetch_latency=10
-prefetch_degree=1
-prefetch_policy=none
-prefetch_cache_check_push=true
-prefetch_use_cpu_id=true
-prefetch_data_accesses_only=false
-hit_latency=1
-
[system.cpu.fuPool.FUList0.opList0]
type=OpDesc
opClass=IntAlu
@@ -210,7 +172,6 @@ clock=1
numThreads=1
activity=0
workload=system.cpu.workload
-mem=system.cpu.dcache
checker=null
max_insts_any_thread=0
max_insts_all_threads=0
@@ -292,7 +253,44 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
+protocol=null
+trace_addr=0
+hash_delay=1
+repl=null
+compressed_bus=false
+store_compressed=false
+adaptive_compression=false
+compression_latency=0
+block_size=64
+max_miss_count=0
+addr_range=[0,18446744073709551615]
+split=false
+split_size=0
+lifo=false
+two_queue=false
+prefetch_miss=false
+prefetch_access=false
+prefetcher_size=100
+prefetch_past_page=false
+prefetch_serial_squash=false
+prefetch_latency=10
+prefetch_degree=1
+prefetch_policy=none
+prefetch_cache_check_push=true
+prefetch_use_cpu_id=true
+prefetch_data_accesses_only=false
+hit_latency=1
+
+[system.cpu.dcache]
+type=BaseCache
+size=262144
+assoc=2
+block_size=64
+latency=1
+mshrs=10
+tgts_per_mshr=5
+write_buffers=8
+prioritizeRequests=false
protocol=null
trace_addr=0
hash_delay=1
@@ -331,7 +329,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
@@ -365,6 +362,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[trace]
flags=
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
index 608fb0be9..0426166d9 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/m5stats.txt
@@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 443 # Nu
global.BPredUnit.condPredicted 1570 # Number of conditional branches predicted
global.BPredUnit.lookups 5322 # Number of BP lookups
global.BPredUnit.usedRAS 2820 # Number of times the RAS was used to get a target.
-host_inst_rate 1288 # Simulator instruction rate (inst/s)
-host_mem_usage 180572 # Number of bytes of host memory used
-host_seconds 4.37 # Real time elapsed on the host
-host_tick_rate 322418 # Simulator tick rate (ticks/s)
+host_inst_rate 9098 # Simulator instruction rate (inst/s)
+host_mem_usage 180112 # Number of bytes of host memory used
+host_seconds 0.62 # Real time elapsed on the host
+host_tick_rate 2277354 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 27 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 144 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 3819 # Number of loads inserted to the mem dependence unit.
@@ -98,7 +98,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 2409 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 5958.666667 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 6120.796512 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 1986 # number of overall hits
system.cpu.dcache.overall_miss_latency 2520516 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.175592 # miss rate for overall accesses
@@ -195,7 +195,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 6541 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 5110.042601 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 4297.762058 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 6095 # number of overall hits
system.cpu.icache.overall_miss_latency 2279079 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.068185 # miss rate for overall accesses
@@ -269,20 +269,20 @@ system.cpu.ipc 0.003993 # IP
system.cpu.ipc_total 0.003993 # IPC: Total IPC of All Threads
system.cpu.iq.ISSUE:FU_type_0 13960 # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.start_dist
-(null) 2 0.01% # Type of FU issued
-IntAlu 8277 59.29% # Type of FU issued
-IntMult 1 0.01% # Type of FU issued
-IntDiv 0 0.00% # Type of FU issued
-FloatAdd 2 0.01% # Type of FU issued
-FloatCmp 0 0.00% # Type of FU issued
-FloatCvt 0 0.00% # Type of FU issued
-FloatMult 0 0.00% # Type of FU issued
-FloatDiv 0 0.00% # Type of FU issued
-FloatSqrt 0 0.00% # Type of FU issued
-MemRead 3509 25.14% # Type of FU issued
-MemWrite 2169 15.54% # Type of FU issued
-IprAccess 0 0.00% # Type of FU issued
-InstPrefetch 0 0.00% # Type of FU issued
+ (null) 2 0.01% # Type of FU issued
+ IntAlu 8277 59.29% # Type of FU issued
+ IntMult 1 0.01% # Type of FU issued
+ IntDiv 0 0.00% # Type of FU issued
+ FloatAdd 2 0.01% # Type of FU issued
+ FloatCmp 0 0.00% # Type of FU issued
+ FloatCvt 0 0.00% # Type of FU issued
+ FloatMult 0 0.00% # Type of FU issued
+ FloatDiv 0 0.00% # Type of FU issued
+ FloatSqrt 0 0.00% # Type of FU issued
+ MemRead 3509 25.14% # Type of FU issued
+ MemWrite 2169 15.54% # Type of FU issued
+ IprAccess 0 0.00% # Type of FU issued
+ InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.end_dist
system.cpu.iq.ISSUE:fu_busy_cnt 93 # FU busy when requested
system.cpu.iq.ISSUE:fu_busy_rate 0.006662 # FU busy rate (busy events/executed inst)
@@ -360,7 +360,7 @@ system.cpu.l2cache.no_allocate_misses 0 # Nu
system.cpu.l2cache.overall_accesses 483 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 4537.301455 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 2307.006237 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 2 # number of overall hits
system.cpu.l2cache.overall_miss_latency 2182442 # number of overall miss cycles
system.cpu.l2cache.overall_miss_rate 0.995859 # miss rate for overall accesses
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
index 0ca948630..467898e3f 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stderr
@@ -1,6 +1,5 @@
warn: Entering event queue @ 0. Starting simulation...
warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
diff --git a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
index d088333a5..d566e3502 100644
--- a/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/o3-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:07:55 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:43 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/o3-timing tests/run.py quick/00.hello/alpha/linux/o3-timing
Exiting @ tick 1408131 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
index b8aba735a..f509ba165 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
progress_interval=0
simulate_stalls=false
system=system
@@ -92,6 +91,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.icache_port system.cpu.dcache_port
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
index 71a43d484..a08bc7c0c 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/config.out
@@ -21,6 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -44,7 +45,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.physmem
system=system
cpu_id=0
workload=system.cpu.workload
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
index 875e55644..96973fa46 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 172802 # Simulator instruction rate (inst/s)
-host_mem_usage 148116 # Number of bytes of host memory used
-host_seconds 0.03 # Real time elapsed on the host
-host_tick_rate 170614 # Simulator tick rate (ticks/s)
+host_inst_rate 684709 # Simulator instruction rate (inst/s)
+host_mem_usage 148256 # Number of bytes of host memory used
+host_seconds 0.01 # Real time elapsed on the host
+host_tick_rate 650634 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stderr b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stderr
index 5e6a1840a..87866a2a5 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stderr
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stderr
@@ -1,2 +1 @@
warn: Entering event queue @ 0. Starting simulation...
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
index 59f571aaf..c451577a3 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-atomic/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 8 2006 14:00:39
-M5 started Sun Oct 8 14:00:50 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:43 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-atomic tests/run.py quick/00.hello/alpha/linux/simple-atomic
Exiting @ tick 5641 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
index f8e1f1bb0..d8fc14e8d 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
progress_interval=0
system=system
workload=system.cpu.workload
@@ -78,7 +77,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -118,7 +116,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -158,7 +155,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -195,6 +191,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -217,6 +214,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
index 2ab7c0150..e9f48f15c 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/config.out
@@ -21,45 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
-
-[system.cpu.dcache]
-type=BaseCache
-size=262144
-assoc=2
-block_size=64
-latency=1
-mshrs=10
-tgts_per_mshr=5
-write_buffers=8
-prioritizeRequests=false
-do_copy=false
-protocol=null
-trace_addr=0
-hash_delay=1
-repl=null
-compressed_bus=false
-store_compressed=false
-adaptive_compression=false
-compression_latency=0
-block_size=64
-max_miss_count=0
-addr_range=[0,18446744073709551615]
-split=false
-split_size=0
-lifo=false
-two_queue=false
-prefetch_miss=false
-prefetch_access=false
-prefetcher_size=100
-prefetch_past_page=false
-prefetch_serial_squash=false
-prefetch_latency=10
-prefetch_degree=1
-prefetch_policy=none
-prefetch_cache_check_push=true
-prefetch_use_cpu_id=true
-prefetch_data_accesses_only=false
-hit_latency=1
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -83,7 +45,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.cpu.dcache
system=system
cpu_id=0
workload=system.cpu.workload
@@ -99,6 +60,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.icache]
type=BaseCache
@@ -110,7 +72,44 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
+protocol=null
+trace_addr=0
+hash_delay=1
+repl=null
+compressed_bus=false
+store_compressed=false
+adaptive_compression=false
+compression_latency=0
+block_size=64
+max_miss_count=0
+addr_range=[0,18446744073709551615]
+split=false
+split_size=0
+lifo=false
+two_queue=false
+prefetch_miss=false
+prefetch_access=false
+prefetcher_size=100
+prefetch_past_page=false
+prefetch_serial_squash=false
+prefetch_latency=10
+prefetch_degree=1
+prefetch_policy=none
+prefetch_cache_check_push=true
+prefetch_use_cpu_id=true
+prefetch_data_accesses_only=false
+hit_latency=1
+
+[system.cpu.dcache]
+type=BaseCache
+size=262144
+assoc=2
+block_size=64
+latency=1
+mshrs=10
+tgts_per_mshr=5
+write_buffers=8
+prioritizeRequests=false
protocol=null
trace_addr=0
hash_delay=1
@@ -149,7 +148,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
index 6ab4e0920..27822f334 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 8293 # Simulator instruction rate (inst/s)
-host_mem_usage 179892 # Number of bytes of host memory used
-host_seconds 0.68 # Real time elapsed on the host
-host_tick_rate 2595779 # Simulator tick rate (ticks/s)
+host_inst_rate 179790 # Simulator instruction rate (inst/s)
+host_mem_usage 179436 # Number of bytes of host memory used
+host_seconds 0.03 # Real time elapsed on the host
+host_tick_rate 55533187 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 5642 # Number of instructions simulated
sim_seconds 0.000002 # Number of seconds simulated
@@ -53,7 +53,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 1791 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 3984.721212 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 2984.721212 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 1626 # number of overall hits
system.cpu.dcache.overall_miss_latency 657479 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.092127 # miss rate for overall accesses
@@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 5643 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 3980.490975 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 2980.490975 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 5366 # number of overall hits
system.cpu.icache.overall_miss_latency 1102596 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.049087 # miss rate for overall accesses
@@ -178,7 +178,7 @@ system.cpu.l2cache.no_allocate_misses 0 # Nu
system.cpu.l2cache.overall_accesses 442 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2984.340136 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1983.340136 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 1 # number of overall hits
system.cpu.l2cache.overall_miss_latency 1316094 # number of overall miss cycles
system.cpu.l2cache.overall_miss_rate 0.997738 # miss rate for overall accesses
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stderr b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stderr
index 5e6a1840a..87866a2a5 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stderr
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stderr
@@ -1,2 +1 @@
warn: Entering event queue @ 0. Starting simulation...
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
diff --git a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
index 31db8804a..61f79d88f 100644
--- a/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/linux/simple-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:08:16 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:44 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/00.hello/alpha/linux/simple-timing tests/run.py quick/00.hello/alpha/linux/simple-timing
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/linux/simple-timing tests/run.py quick/00.hello/alpha/linux/simple-timing
Exiting @ tick 1767066 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.ini b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.ini
index e15dd47b7..9f557431e 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.ini
@@ -102,7 +102,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
numIQEntries=64
numPhysFloatRegs=256
numPhysIntRegs=256
@@ -132,7 +131,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -309,7 +307,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -349,7 +346,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -386,6 +382,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -408,6 +405,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.out b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.out
index a57dbacf3..bf7a9fe00 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/config.out
@@ -21,6 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -37,45 +38,6 @@ egid=100
pid=100
ppid=99
-[system.cpu.dcache]
-type=BaseCache
-size=262144
-assoc=2
-block_size=64
-latency=1
-mshrs=10
-tgts_per_mshr=5
-write_buffers=8
-prioritizeRequests=false
-do_copy=false
-protocol=null
-trace_addr=0
-hash_delay=1
-repl=null
-compressed_bus=false
-store_compressed=false
-adaptive_compression=false
-compression_latency=0
-block_size=64
-max_miss_count=0
-addr_range=[0,18446744073709551615]
-split=false
-split_size=0
-lifo=false
-two_queue=false
-prefetch_miss=false
-prefetch_access=false
-prefetcher_size=100
-prefetch_past_page=false
-prefetch_serial_squash=false
-prefetch_latency=10
-prefetch_degree=1
-prefetch_policy=none
-prefetch_cache_check_push=true
-prefetch_use_cpu_id=true
-prefetch_data_accesses_only=false
-hit_latency=1
-
[system.cpu.fuPool.FUList0.opList0]
type=OpDesc
opClass=IntAlu
@@ -210,7 +172,6 @@ clock=1
numThreads=1
activity=0
workload=system.cpu.workload
-mem=system.cpu.dcache
checker=null
max_insts_any_thread=0
max_insts_all_threads=0
@@ -292,7 +253,44 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
+protocol=null
+trace_addr=0
+hash_delay=1
+repl=null
+compressed_bus=false
+store_compressed=false
+adaptive_compression=false
+compression_latency=0
+block_size=64
+max_miss_count=0
+addr_range=[0,18446744073709551615]
+split=false
+split_size=0
+lifo=false
+two_queue=false
+prefetch_miss=false
+prefetch_access=false
+prefetcher_size=100
+prefetch_past_page=false
+prefetch_serial_squash=false
+prefetch_latency=10
+prefetch_degree=1
+prefetch_policy=none
+prefetch_cache_check_push=true
+prefetch_use_cpu_id=true
+prefetch_data_accesses_only=false
+hit_latency=1
+
+[system.cpu.dcache]
+type=BaseCache
+size=262144
+assoc=2
+block_size=64
+latency=1
+mshrs=10
+tgts_per_mshr=5
+write_buffers=8
+prioritizeRequests=false
protocol=null
trace_addr=0
hash_delay=1
@@ -331,7 +329,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
@@ -365,6 +362,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[trace]
flags=
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
index 95835cb62..44f155480 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/m5stats.txt
@@ -8,10 +8,10 @@ global.BPredUnit.condIncorrect 221 # Nu
global.BPredUnit.condPredicted 451 # Number of conditional branches predicted
global.BPredUnit.lookups 891 # Number of BP lookups
global.BPredUnit.usedRAS 172 # Number of times the RAS was used to get a target.
-host_inst_rate 1447 # Simulator instruction rate (inst/s)
-host_mem_usage 180084 # Number of bytes of host memory used
-host_seconds 1.65 # Real time elapsed on the host
-host_tick_rate 455868 # Simulator tick rate (ticks/s)
+host_inst_rate 20134 # Simulator instruction rate (inst/s)
+host_mem_usage 179640 # Number of bytes of host memory used
+host_seconds 0.12 # Real time elapsed on the host
+host_tick_rate 6326998 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 10 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 8 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 784 # Number of loads inserted to the mem dependence unit.
@@ -98,7 +98,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 856 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 6991.981481 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 7086.141176 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 694 # number of overall hits
system.cpu.dcache.overall_miss_latency 1132701 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.189252 # miss rate for overall accesses
@@ -195,7 +195,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 814 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 4971.589641 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 4152.244565 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 563 # number of overall hits
system.cpu.icache.overall_miss_latency 1247869 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.308354 # miss rate for overall accesses
@@ -269,20 +269,20 @@ system.cpu.ipc 0.003174 # IP
system.cpu.ipc_total 0.003174 # IPC: Total IPC of All Threads
system.cpu.iq.ISSUE:FU_type_0 3500 # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.start_dist
-(null) 0 0.00% # Type of FU issued
-IntAlu 2460 70.29% # Type of FU issued
-IntMult 1 0.03% # Type of FU issued
-IntDiv 0 0.00% # Type of FU issued
-FloatAdd 0 0.00% # Type of FU issued
-FloatCmp 0 0.00% # Type of FU issued
-FloatCvt 0 0.00% # Type of FU issued
-FloatMult 0 0.00% # Type of FU issued
-FloatDiv 0 0.00% # Type of FU issued
-FloatSqrt 0 0.00% # Type of FU issued
-MemRead 695 19.86% # Type of FU issued
-MemWrite 344 9.83% # Type of FU issued
-IprAccess 0 0.00% # Type of FU issued
-InstPrefetch 0 0.00% # Type of FU issued
+ (null) 0 0.00% # Type of FU issued
+ IntAlu 2460 70.29% # Type of FU issued
+ IntMult 1 0.03% # Type of FU issued
+ IntDiv 0 0.00% # Type of FU issued
+ FloatAdd 0 0.00% # Type of FU issued
+ FloatCmp 0 0.00% # Type of FU issued
+ FloatCvt 0 0.00% # Type of FU issued
+ FloatMult 0 0.00% # Type of FU issued
+ FloatDiv 0 0.00% # Type of FU issued
+ FloatSqrt 0 0.00% # Type of FU issued
+ MemRead 695 19.86% # Type of FU issued
+ MemWrite 344 9.83% # Type of FU issued
+ IprAccess 0 0.00% # Type of FU issued
+ InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.end_dist
system.cpu.iq.ISSUE:fu_busy_cnt 35 # FU busy when requested
system.cpu.iq.ISSUE:fu_busy_rate 0.010000 # FU busy rate (busy events/executed inst)
@@ -359,7 +359,7 @@ system.cpu.l2cache.no_allocate_misses 0 # Nu
system.cpu.l2cache.overall_accesses 269 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 4622.063197 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 2296.591078 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 0 # number of overall hits
system.cpu.l2cache.overall_miss_latency 1243335 # number of overall miss cycles
system.cpu.l2cache.overall_miss_rate 1 # miss rate for overall accesses
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stderr b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stderr
index 5f8fafdd1..cb1e9904d 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stderr
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stderr
@@ -1,6 +1,5 @@
warn: Entering event queue @ 0. Starting simulation...
warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff8
warn: cycle 109049: fault (page_table_fault) detected @ PC 0x000000
warn: cycle 109050: fault (page_table_fault) detected @ PC 0x000000
warn: cycle 109051: fault (page_table_fault) detected @ PC 0x000000
diff --git a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
index 6f8154bb0..4453bcfe2 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/o3-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:08:37 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:50 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/00.hello/alpha/tru64/o3-timing tests/run.py quick/00.hello/alpha/tru64/o3-timing
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/o3-timing tests/run.py quick/00.hello/alpha/tru64/o3-timing
Exiting @ tick 752027 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
index 60783267b..087f6ac50 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
progress_interval=0
simulate_stalls=false
system=system
@@ -92,6 +91,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.icache_port system.cpu.dcache_port
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
index c8733b8f7..f28f7ae60 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/config.out
@@ -21,6 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -44,7 +45,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.physmem
system=system
cpu_id=0
workload=system.cpu.workload
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
index e3f845135..25dace389 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 60702 # Simulator instruction rate (inst/s)
-host_mem_usage 147692 # Number of bytes of host memory used
-host_seconds 0.04 # Real time elapsed on the host
-host_tick_rate 60102 # Simulator tick rate (ticks/s)
+host_inst_rate 480164 # Simulator instruction rate (inst/s)
+host_mem_usage 147928 # Number of bytes of host memory used
+host_seconds 0.01 # Real time elapsed on the host
+host_tick_rate 437596 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stderr b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stderr
index c2154cff2..b3cdfe967 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stderr
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stderr
@@ -1,3 +1,2 @@
warn: Entering event queue @ 0. Starting simulation...
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff8
warn: ignoring syscall sigprocmask(1, 18446744073709547831, ...)
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
index 2ee4e0a08..099a6d041 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-atomic/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 8 2006 14:00:39
-M5 started Sun Oct 8 14:00:54 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:50 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-atomic tests/run.py quick/00.hello/alpha/tru64/simple-atomic
Exiting @ tick 2577 because target called exit()
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
index f32654f76..3fbabab03 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
progress_interval=0
system=system
workload=system.cpu.workload
@@ -78,7 +77,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -118,7 +116,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -158,7 +155,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -195,6 +191,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -217,6 +214,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
index c45e587d9..dcdc36e90 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/config.out
@@ -21,45 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
-
-[system.cpu.dcache]
-type=BaseCache
-size=262144
-assoc=2
-block_size=64
-latency=1
-mshrs=10
-tgts_per_mshr=5
-write_buffers=8
-prioritizeRequests=false
-do_copy=false
-protocol=null
-trace_addr=0
-hash_delay=1
-repl=null
-compressed_bus=false
-store_compressed=false
-adaptive_compression=false
-compression_latency=0
-block_size=64
-max_miss_count=0
-addr_range=[0,18446744073709551615]
-split=false
-split_size=0
-lifo=false
-two_queue=false
-prefetch_miss=false
-prefetch_access=false
-prefetcher_size=100
-prefetch_past_page=false
-prefetch_serial_squash=false
-prefetch_latency=10
-prefetch_degree=1
-prefetch_policy=none
-prefetch_cache_check_push=true
-prefetch_use_cpu_id=true
-prefetch_data_accesses_only=false
-hit_latency=1
+responder_set=false
[system.cpu.workload]
type=LiveProcess
@@ -83,7 +45,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.cpu.dcache
system=system
cpu_id=0
workload=system.cpu.workload
@@ -99,6 +60,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.icache]
type=BaseCache
@@ -110,7 +72,44 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
+protocol=null
+trace_addr=0
+hash_delay=1
+repl=null
+compressed_bus=false
+store_compressed=false
+adaptive_compression=false
+compression_latency=0
+block_size=64
+max_miss_count=0
+addr_range=[0,18446744073709551615]
+split=false
+split_size=0
+lifo=false
+two_queue=false
+prefetch_miss=false
+prefetch_access=false
+prefetcher_size=100
+prefetch_past_page=false
+prefetch_serial_squash=false
+prefetch_latency=10
+prefetch_degree=1
+prefetch_policy=none
+prefetch_cache_check_push=true
+prefetch_use_cpu_id=true
+prefetch_data_accesses_only=false
+hit_latency=1
+
+[system.cpu.dcache]
+type=BaseCache
+size=262144
+assoc=2
+block_size=64
+latency=1
+mshrs=10
+tgts_per_mshr=5
+write_buffers=8
+prioritizeRequests=false
protocol=null
trace_addr=0
hash_delay=1
@@ -149,7 +148,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
index 53f245414..010da4162 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 7429 # Simulator instruction rate (inst/s)
-host_mem_usage 179540 # Number of bytes of host memory used
-host_seconds 0.35 # Real time elapsed on the host
-host_tick_rate 2820365 # Simulator tick rate (ticks/s)
+host_inst_rate 153015 # Simulator instruction rate (inst/s)
+host_mem_usage 179088 # Number of bytes of host memory used
+host_seconds 0.02 # Real time elapsed on the host
+host_tick_rate 56749783 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 2578 # Number of instructions simulated
sim_seconds 0.000001 # Number of seconds simulated
@@ -53,7 +53,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 709 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 3989.475610 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 2989.475610 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 627 # number of overall hits
system.cpu.dcache.overall_miss_latency 327137 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.115656 # miss rate for overall accesses
@@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 2579 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 3986.705521 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 2986.705521 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 2416 # number of overall hits
system.cpu.icache.overall_miss_latency 649833 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.063203 # miss rate for overall accesses
@@ -177,7 +177,7 @@ system.cpu.l2cache.no_allocate_misses 0 # Nu
system.cpu.l2cache.overall_accesses 245 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2987.632653 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1986.632653 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 0 # number of overall hits
system.cpu.l2cache.overall_miss_latency 731970 # number of overall miss cycles
system.cpu.l2cache.overall_miss_rate 1 # miss rate for overall accesses
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stderr b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stderr
index c2154cff2..b3cdfe967 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stderr
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stderr
@@ -1,3 +1,2 @@
warn: Entering event queue @ 0. Starting simulation...
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff8
warn: ignoring syscall sigprocmask(1, 18446744073709547831, ...)
diff --git a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
index b479e5a46..cf7a58ef1 100644
--- a/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
+++ b/tests/quick/00.hello/ref/alpha/tru64/simple-timing/stdout
@@ -6,8 +6,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:08:56 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:51 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/00.hello/alpha/tru64/simple-timing tests/run.py quick/00.hello/alpha/tru64/simple-timing
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/00.hello/alpha/tru64/simple-timing tests/run.py quick/00.hello/alpha/tru64/simple-timing
Exiting @ tick 980012 because target called exit()
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini
index 9dad57e13..df37337b1 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.ini
@@ -102,7 +102,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
numIQEntries=64
numPhysFloatRegs=256
numPhysIntRegs=256
@@ -132,7 +131,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -309,7 +307,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -349,7 +346,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -386,6 +382,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -423,6 +420,7 @@ uid=100
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out
index bb55a2b69..b0dbe1796 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/config.out
@@ -21,6 +21,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[system.cpu.workload0]
type=LiveProcess
@@ -52,45 +53,6 @@ egid=100
pid=100
ppid=99
-[system.cpu.dcache]
-type=BaseCache
-size=262144
-assoc=2
-block_size=64
-latency=1
-mshrs=10
-tgts_per_mshr=5
-write_buffers=8
-prioritizeRequests=false
-do_copy=false
-protocol=null
-trace_addr=0
-hash_delay=1
-repl=null
-compressed_bus=false
-store_compressed=false
-adaptive_compression=false
-compression_latency=0
-block_size=64
-max_miss_count=0
-addr_range=[0,18446744073709551615]
-split=false
-split_size=0
-lifo=false
-two_queue=false
-prefetch_miss=false
-prefetch_access=false
-prefetcher_size=100
-prefetch_past_page=false
-prefetch_serial_squash=false
-prefetch_latency=10
-prefetch_degree=1
-prefetch_policy=none
-prefetch_cache_check_push=true
-prefetch_use_cpu_id=true
-prefetch_data_accesses_only=false
-hit_latency=1
-
[system.cpu.fuPool.FUList0.opList0]
type=OpDesc
opClass=IntAlu
@@ -225,7 +187,6 @@ clock=1
numThreads=1
activity=0
workload=system.cpu.workload0 system.cpu.workload1
-mem=system.cpu.dcache
checker=null
max_insts_any_thread=0
max_insts_all_threads=0
@@ -307,7 +268,44 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
+protocol=null
+trace_addr=0
+hash_delay=1
+repl=null
+compressed_bus=false
+store_compressed=false
+adaptive_compression=false
+compression_latency=0
+block_size=64
+max_miss_count=0
+addr_range=[0,18446744073709551615]
+split=false
+split_size=0
+lifo=false
+two_queue=false
+prefetch_miss=false
+prefetch_access=false
+prefetcher_size=100
+prefetch_past_page=false
+prefetch_serial_squash=false
+prefetch_latency=10
+prefetch_degree=1
+prefetch_policy=none
+prefetch_cache_check_push=true
+prefetch_use_cpu_id=true
+prefetch_data_accesses_only=false
+hit_latency=1
+
+[system.cpu.dcache]
+type=BaseCache
+size=262144
+assoc=2
+block_size=64
+latency=1
+mshrs=10
+tgts_per_mshr=5
+write_buffers=8
+prioritizeRequests=false
protocol=null
trace_addr=0
hash_delay=1
@@ -346,7 +344,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
@@ -380,6 +377,7 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
[trace]
flags=
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
index 32bf8dc98..5115a5908 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/m5stats.txt
@@ -1,29 +1,29 @@
---------- Begin Simulation Statistics ----------
global.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
-global.BPredUnit.BTBHits 1308 # Number of BTB hits
-global.BPredUnit.BTBLookups 6837 # Number of BTB lookups
+global.BPredUnit.BTBHits 1309 # Number of BTB hits
+global.BPredUnit.BTBLookups 6835 # Number of BTB lookups
global.BPredUnit.RASInCorrect 164 # Number of incorrect RAS predictions.
-global.BPredUnit.condIncorrect 1235 # Number of conditional branches incorrect
-global.BPredUnit.condPredicted 4603 # Number of conditional branches predicted
-global.BPredUnit.lookups 12596 # Number of BP lookups
-global.BPredUnit.usedRAS 5739 # Number of times the RAS was used to get a target.
-host_inst_rate 945 # Simulator instruction rate (inst/s)
-host_mem_usage 181580 # Number of bytes of host memory used
-host_seconds 11.90 # Real time elapsed on the host
-host_tick_rate 187981 # Simulator tick rate (ticks/s)
-memdepunit.memDep.conflictingLoads 29 # Number of conflicting loads.
+global.BPredUnit.condIncorrect 1233 # Number of conditional branches incorrect
+global.BPredUnit.condPredicted 4602 # Number of conditional branches predicted
+global.BPredUnit.lookups 12593 # Number of BP lookups
+global.BPredUnit.usedRAS 5738 # Number of times the RAS was used to get a target.
+host_inst_rate 9412 # Simulator instruction rate (inst/s)
+host_mem_usage 181120 # Number of bytes of host memory used
+host_seconds 1.20 # Real time elapsed on the host
+host_tick_rate 1873386 # Simulator tick rate (ticks/s)
+memdepunit.memDep.conflictingLoads 0 # Number of conflicting loads.
memdepunit.memDep.conflictingLoads 22 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 52 # Number of conflicting stores.
memdepunit.memDep.conflictingStores 3 # Number of conflicting stores.
-memdepunit.memDep.insertedLoads 6560 # Number of loads inserted to the mem dependence unit.
-memdepunit.memDep.insertedLoads 3600 # Number of loads inserted to the mem dependence unit.
+memdepunit.memDep.insertedLoads 6549 # Number of loads inserted to the mem dependence unit.
+memdepunit.memDep.insertedLoads 3592 # Number of loads inserted to the mem dependence unit.
memdepunit.memDep.insertedStores 5837 # Number of stores inserted to the mem dependence unit.
memdepunit.memDep.insertedStores 2389 # Number of stores inserted to the mem dependence unit.
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 11247 # Number of instructions simulated
sim_seconds 0.000002 # Number of seconds simulated
-sim_ticks 2237162 # Number of ticks simulated
+sim_ticks 2239163 # Number of ticks simulated
system.cpu.commit.COM:branches 1724 # Number of branches committed
system.cpu.commit.COM:branches_0 862 # Number of branches committed
system.cpu.commit.COM:branches_1 862 # Number of branches committed
@@ -32,17 +32,17 @@ system.cpu.commit.COM:bw_limited 0 # nu
system.cpu.commit.COM:bw_limited_0 0 # number of insts not committed due to BW limits
system.cpu.commit.COM:bw_limited_1 0 # number of insts not committed due to BW limits
system.cpu.commit.COM:committed_per_cycle.start_dist # Number of insts commited each cycle
-system.cpu.commit.COM:committed_per_cycle.samples 189229
+system.cpu.commit.COM:committed_per_cycle.samples 185440
system.cpu.commit.COM:committed_per_cycle.min_value 0
- 0 183654 9705.38%
- 1 3073 162.40%
- 2 1213 64.10%
- 3 492 26.00%
- 4 307 16.22%
- 5 181 9.57%
- 6 120 6.34%
- 7 59 3.12%
- 8 130 6.87%
+ 0 179865 9699.36%
+ 1 3074 165.77%
+ 2 1213 65.41%
+ 3 492 26.53%
+ 4 305 16.45%
+ 5 181 9.76%
+ 6 120 6.47%
+ 7 60 3.24%
+ 8 130 7.01%
system.cpu.commit.COM:committed_per_cycle.max_value 8
system.cpu.commit.COM:committed_per_cycle.end_dist
@@ -61,34 +61,34 @@ system.cpu.commit.COM:refs_1 1791 # Nu
system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
system.cpu.commit.COM:swp_count_0 0 # Number of s/w prefetches committed
system.cpu.commit.COM:swp_count_1 0 # Number of s/w prefetches committed
-system.cpu.commit.branchMispredicts 980 # The number of times a branch was mispredicted
+system.cpu.commit.branchMispredicts 978 # The number of times a branch was mispredicted
system.cpu.commit.commitCommittedInsts 11281 # The number of committed instructions
system.cpu.commit.commitNonSpecStalls 34 # The number of times commit has been forced to stall to communicate backwards
-system.cpu.commit.commitSquashedInsts 31727 # The number of squashed insts skipped by commit
+system.cpu.commit.commitSquashedInsts 31695 # The number of squashed insts skipped by commit
system.cpu.committedInsts_0 5624 # Number of Instructions Simulated
system.cpu.committedInsts_1 5623 # Number of Instructions Simulated
system.cpu.committedInsts_total 11247 # Number of Instructions Simulated
-system.cpu.cpi_0 397.788407 # CPI: Cycles Per Instruction
-system.cpu.cpi_1 397.859150 # CPI: Cycles Per Instruction
-system.cpu.cpi_total 198.911888 # CPI: Total CPI of All Threads
+system.cpu.cpi_0 398.144203 # CPI: Cycles Per Instruction
+system.cpu.cpi_1 398.215010 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 199.089802 # CPI: Total CPI of All Threads
system.cpu.dcache.ReadReq_accesses 3208 # number of ReadReq accesses(hits+misses)
system.cpu.dcache.ReadReq_accesses_0 3208 # number of ReadReq accesses(hits+misses)
-system.cpu.dcache.ReadReq_avg_miss_latency 10081.356250 # average ReadReq miss latency
-system.cpu.dcache.ReadReq_avg_miss_latency_0 10081.356250 # average ReadReq miss latency
-system.cpu.dcache.ReadReq_avg_mshr_miss_latency 10477.810000 # average ReadReq mshr miss latency
-system.cpu.dcache.ReadReq_avg_mshr_miss_latency_0 10477.810000 # average ReadReq mshr miss latency
-system.cpu.dcache.ReadReq_hits 2888 # number of ReadReq hits
-system.cpu.dcache.ReadReq_hits_0 2888 # number of ReadReq hits
-system.cpu.dcache.ReadReq_miss_latency 3226034 # number of ReadReq miss cycles
-system.cpu.dcache.ReadReq_miss_latency_0 3226034 # number of ReadReq miss cycles
-system.cpu.dcache.ReadReq_miss_rate 0.099751 # miss rate for ReadReq accesses
-system.cpu.dcache.ReadReq_miss_rate_0 0.099751 # miss rate for ReadReq accesses
-system.cpu.dcache.ReadReq_misses 320 # number of ReadReq misses
-system.cpu.dcache.ReadReq_misses_0 320 # number of ReadReq misses
-system.cpu.dcache.ReadReq_mshr_hits 120 # number of ReadReq MSHR hits
-system.cpu.dcache.ReadReq_mshr_hits_0 120 # number of ReadReq MSHR hits
-system.cpu.dcache.ReadReq_mshr_miss_latency 2095562 # number of ReadReq MSHR miss cycles
-system.cpu.dcache.ReadReq_mshr_miss_latency_0 2095562 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_avg_miss_latency 10071.492212 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_miss_latency_0 10071.492212 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 10492.815000 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency_0 10492.815000 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 2887 # number of ReadReq hits
+system.cpu.dcache.ReadReq_hits_0 2887 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 3232949 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_latency_0 3232949 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.100062 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_miss_rate_0 0.100062 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 321 # number of ReadReq misses
+system.cpu.dcache.ReadReq_misses_0 321 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_hits 121 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_hits_0 121 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 2098563 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_latency_0 2098563 # number of ReadReq MSHR miss cycles
system.cpu.dcache.ReadReq_mshr_miss_rate 0.062344 # mshr miss rate for ReadReq accesses
system.cpu.dcache.ReadReq_mshr_miss_rate_0 0.062344 # mshr miss rate for ReadReq accesses
system.cpu.dcache.ReadReq_mshr_misses 200 # number of ReadReq MSHR misses
@@ -117,7 +117,7 @@ system.cpu.dcache.WriteReq_mshr_misses 146 # nu
system.cpu.dcache.WriteReq_mshr_misses_0 146 # number of WriteReq MSHR misses
system.cpu.dcache.avg_blocked_cycles_no_mshrs 3977 # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets 3606.011765 # average number of cycles each access was blocked
-system.cpu.dcache.avg_refs 11.575145 # Average number of references to valid blocks.
+system.cpu.dcache.avg_refs 11.572254 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 1 # number of cycles access was blocked
system.cpu.dcache.blocked_no_targets 85 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_mshrs 3977 # number of cycles access was blocked
@@ -126,33 +126,33 @@ system.cpu.dcache.cache_copies 0 # nu
system.cpu.dcache.demand_accesses 4832 # number of demand (read+write) accesses
system.cpu.dcache.demand_accesses_0 4832 # number of demand (read+write) accesses
system.cpu.dcache.demand_accesses_1 0 # number of demand (read+write) accesses
-system.cpu.dcache.demand_avg_miss_latency 7905.902056 # average overall miss latency
-system.cpu.dcache.demand_avg_miss_latency_0 7905.902056 # average overall miss latency
+system.cpu.dcache.demand_avg_miss_latency 7904.705314 # average overall miss latency
+system.cpu.dcache.demand_avg_miss_latency_0 7904.705314 # average overall miss latency
system.cpu.dcache.demand_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.dcache.demand_avg_mshr_miss_latency 9355.303468 # average overall mshr miss latency
-system.cpu.dcache.demand_avg_mshr_miss_latency_0 9355.303468 # average overall mshr miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 9363.976879 # average overall mshr miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency_0 9363.976879 # average overall mshr miss latency
system.cpu.dcache.demand_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.dcache.demand_hits 4005 # number of demand (read+write) hits
-system.cpu.dcache.demand_hits_0 4005 # number of demand (read+write) hits
+system.cpu.dcache.demand_hits 4004 # number of demand (read+write) hits
+system.cpu.dcache.demand_hits_0 4004 # number of demand (read+write) hits
system.cpu.dcache.demand_hits_1 0 # number of demand (read+write) hits
-system.cpu.dcache.demand_miss_latency 6538181 # number of demand (read+write) miss cycles
-system.cpu.dcache.demand_miss_latency_0 6538181 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_latency 6545096 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_latency_0 6545096 # number of demand (read+write) miss cycles
system.cpu.dcache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles
-system.cpu.dcache.demand_miss_rate 0.171151 # miss rate for demand accesses
-system.cpu.dcache.demand_miss_rate_0 0.171151 # miss rate for demand accesses
-system.cpu.dcache.demand_miss_rate_1 no value # miss rate for demand accesses
-system.cpu.dcache.demand_misses 827 # number of demand (read+write) misses
-system.cpu.dcache.demand_misses_0 827 # number of demand (read+write) misses
+system.cpu.dcache.demand_miss_rate 0.171358 # miss rate for demand accesses
+system.cpu.dcache.demand_miss_rate_0 0.171358 # miss rate for demand accesses
+system.cpu.dcache.demand_miss_rate_1 <err: div-0> # miss rate for demand accesses
+system.cpu.dcache.demand_misses 828 # number of demand (read+write) misses
+system.cpu.dcache.demand_misses_0 828 # number of demand (read+write) misses
system.cpu.dcache.demand_misses_1 0 # number of demand (read+write) misses
-system.cpu.dcache.demand_mshr_hits 481 # number of demand (read+write) MSHR hits
-system.cpu.dcache.demand_mshr_hits_0 481 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_hits 482 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_hits_0 482 # number of demand (read+write) MSHR hits
system.cpu.dcache.demand_mshr_hits_1 0 # number of demand (read+write) MSHR hits
-system.cpu.dcache.demand_mshr_miss_latency 3236935 # number of demand (read+write) MSHR miss cycles
-system.cpu.dcache.demand_mshr_miss_latency_0 3236935 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_latency 3239936 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_latency_0 3239936 # number of demand (read+write) MSHR miss cycles
system.cpu.dcache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles
system.cpu.dcache.demand_mshr_miss_rate 0.071606 # mshr miss rate for demand accesses
system.cpu.dcache.demand_mshr_miss_rate_0 0.071606 # mshr miss rate for demand accesses
-system.cpu.dcache.demand_mshr_miss_rate_1 no value # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_miss_rate_1 <err: div-0> # mshr miss rate for demand accesses
system.cpu.dcache.demand_mshr_misses 346 # number of demand (read+write) MSHR misses
system.cpu.dcache.demand_mshr_misses_0 346 # number of demand (read+write) MSHR misses
system.cpu.dcache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses
@@ -164,36 +164,36 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 4832 # number of overall (read+write) accesses
system.cpu.dcache.overall_accesses_0 4832 # number of overall (read+write) accesses
system.cpu.dcache.overall_accesses_1 0 # number of overall (read+write) accesses
-system.cpu.dcache.overall_avg_miss_latency 7905.902056 # average overall miss latency
-system.cpu.dcache.overall_avg_miss_latency_0 7905.902056 # average overall miss latency
+system.cpu.dcache.overall_avg_miss_latency 7904.705314 # average overall miss latency
+system.cpu.dcache.overall_avg_miss_latency_0 7904.705314 # average overall miss latency
system.cpu.dcache.overall_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.dcache.overall_avg_mshr_miss_latency 9355.303468 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_miss_latency_0 9355.303468 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 9363.976879 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency_0 9363.976879 # average overall mshr miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency_0 no value # average overall mshr uncacheable latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency_1 no value # average overall mshr uncacheable latency
-system.cpu.dcache.overall_hits 4005 # number of overall hits
-system.cpu.dcache.overall_hits_0 4005 # number of overall hits
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency_0 <err: div-0> # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency_1 <err: div-0> # average overall mshr uncacheable latency
+system.cpu.dcache.overall_hits 4004 # number of overall hits
+system.cpu.dcache.overall_hits_0 4004 # number of overall hits
system.cpu.dcache.overall_hits_1 0 # number of overall hits
-system.cpu.dcache.overall_miss_latency 6538181 # number of overall miss cycles
-system.cpu.dcache.overall_miss_latency_0 6538181 # number of overall miss cycles
+system.cpu.dcache.overall_miss_latency 6545096 # number of overall miss cycles
+system.cpu.dcache.overall_miss_latency_0 6545096 # number of overall miss cycles
system.cpu.dcache.overall_miss_latency_1 0 # number of overall miss cycles
-system.cpu.dcache.overall_miss_rate 0.171151 # miss rate for overall accesses
-system.cpu.dcache.overall_miss_rate_0 0.171151 # miss rate for overall accesses
-system.cpu.dcache.overall_miss_rate_1 no value # miss rate for overall accesses
-system.cpu.dcache.overall_misses 827 # number of overall misses
-system.cpu.dcache.overall_misses_0 827 # number of overall misses
+system.cpu.dcache.overall_miss_rate 0.171358 # miss rate for overall accesses
+system.cpu.dcache.overall_miss_rate_0 0.171358 # miss rate for overall accesses
+system.cpu.dcache.overall_miss_rate_1 <err: div-0> # miss rate for overall accesses
+system.cpu.dcache.overall_misses 828 # number of overall misses
+system.cpu.dcache.overall_misses_0 828 # number of overall misses
system.cpu.dcache.overall_misses_1 0 # number of overall misses
-system.cpu.dcache.overall_mshr_hits 481 # number of overall MSHR hits
-system.cpu.dcache.overall_mshr_hits_0 481 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_hits 482 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_hits_0 482 # number of overall MSHR hits
system.cpu.dcache.overall_mshr_hits_1 0 # number of overall MSHR hits
-system.cpu.dcache.overall_mshr_miss_latency 3236935 # number of overall MSHR miss cycles
-system.cpu.dcache.overall_mshr_miss_latency_0 3236935 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_latency 3239936 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_latency_0 3239936 # number of overall MSHR miss cycles
system.cpu.dcache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles
system.cpu.dcache.overall_mshr_miss_rate 0.071606 # mshr miss rate for overall accesses
system.cpu.dcache.overall_mshr_miss_rate_0 0.071606 # mshr miss rate for overall accesses
-system.cpu.dcache.overall_mshr_miss_rate_1 no value # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_miss_rate_1 <err: div-0> # mshr miss rate for overall accesses
system.cpu.dcache.overall_mshr_misses 346 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_misses_0 346 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_misses_1 0 # number of overall MSHR misses
@@ -219,149 +219,149 @@ system.cpu.dcache.sampled_refs 346 # Sa
system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.dcache.soft_prefetch_mshr_full_0 0 # number of mshr full events for SW prefetching instrutions
system.cpu.dcache.soft_prefetch_mshr_full_1 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.dcache.tagsinuse 198.595005 # Cycle average of tags in use
-system.cpu.dcache.total_refs 4005 # Total number of references to valid blocks.
+system.cpu.dcache.tagsinuse 198.721819 # Cycle average of tags in use
+system.cpu.dcache.total_refs 4004 # Total number of references to valid blocks.
system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.dcache.writebacks 0 # number of writebacks
system.cpu.dcache.writebacks_0 0 # number of writebacks
system.cpu.dcache.writebacks_1 0 # number of writebacks
-system.cpu.decode.DECODE:BlockedCycles 101864 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:BlockedCycles 96221 # Number of cycles decode is blocked
system.cpu.decode.DECODE:BranchMispred 264 # Number of times decode detected a branch misprediction
system.cpu.decode.DECODE:BranchResolved 379 # Number of times decode resolved a branch
-system.cpu.decode.DECODE:DecodedInsts 73628 # Number of instructions handled by decode
-system.cpu.decode.DECODE:IdleCycles 257376 # Number of cycles decode is idle
-system.cpu.decode.DECODE:RunCycles 12701 # Number of cycles decode is running
-system.cpu.decode.DECODE:SquashCycles 6044 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:DecodedInsts 73578 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 255461 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 12691 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 6036 # Number of cycles decode is squashing
system.cpu.decode.DECODE:SquashedInsts 680 # Number of squashed instructions handled by decode
-system.cpu.decode.DECODE:UnblockCycles 340 # Number of cycles decode is unblocking
-system.cpu.fetch.Branches 12596 # Number of branches that fetch encountered
-system.cpu.fetch.CacheLines 13043 # Number of cache lines fetched
-system.cpu.fetch.Cycles 28220 # Number of cycles fetch has run and was not squashing or blocked
-system.cpu.fetch.IcacheSquashes 1653 # Number of outstanding Icache misses that were squashed
-system.cpu.fetch.Insts 84650 # Number of instructions fetch has processed
-system.cpu.fetch.SquashCycles 4944 # Number of cycles fetch has spent squashing
-system.cpu.fetch.branchRate 0.066558 # Number of branch fetches per cycle
-system.cpu.fetch.icacheStallCycles 52829 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.decode.DECODE:UnblockCycles 337 # Number of cycles decode is unblocking
+system.cpu.fetch.Branches 12593 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 13036 # Number of cache lines fetched
+system.cpu.fetch.Cycles 28204 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 1652 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 84597 # Number of instructions fetch has processed
+system.cpu.fetch.SquashCycles 4941 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.067901 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 52822 # Number of cycles fetch is stalled on an Icache miss
system.cpu.fetch.predictedBranches 7047 # Number of branches that fetch has predicted taken
-system.cpu.fetch.rate 0.447294 # Number of inst fetches per cycle
+system.cpu.fetch.rate 0.456147 # Number of inst fetches per cycle
system.cpu.fetch.rateDist.start_dist # Number of instructions fetched each cycle (Total)
-system.cpu.fetch.rateDist.samples 189249
+system.cpu.fetch.rateDist.samples 185460
system.cpu.fetch.rateDist.min_value 0
- 0 174064 9197.62%
- 1 369 19.50%
- 2 570 30.12%
- 3 3356 177.33%
- 4 1799 95.06%
- 5 1035 54.69%
- 6 675 35.67%
- 7 2396 126.61%
- 8 4985 263.41%
+ 0 170284 9181.71%
+ 1 368 19.84%
+ 2 571 30.79%
+ 3 3355 180.90%
+ 4 1795 96.79%
+ 5 1036 55.86%
+ 6 675 36.40%
+ 7 2396 129.19%
+ 8 4980 268.52%
system.cpu.fetch.rateDist.max_value 8
system.cpu.fetch.rateDist.end_dist
-system.cpu.icache.ReadReq_accesses 13041 # number of ReadReq accesses(hits+misses)
-system.cpu.icache.ReadReq_accesses_0 13041 # number of ReadReq accesses(hits+misses)
-system.cpu.icache.ReadReq_avg_miss_latency 7799.181319 # average ReadReq miss latency
-system.cpu.icache.ReadReq_avg_miss_latency_0 7799.181319 # average ReadReq miss latency
-system.cpu.icache.ReadReq_avg_mshr_miss_latency 7166.106518 # average ReadReq mshr miss latency
-system.cpu.icache.ReadReq_avg_mshr_miss_latency_0 7166.106518 # average ReadReq mshr miss latency
-system.cpu.icache.ReadReq_hits 12131 # number of ReadReq hits
-system.cpu.icache.ReadReq_hits_0 12131 # number of ReadReq hits
-system.cpu.icache.ReadReq_miss_latency 7097255 # number of ReadReq miss cycles
-system.cpu.icache.ReadReq_miss_latency_0 7097255 # number of ReadReq miss cycles
-system.cpu.icache.ReadReq_miss_rate 0.069780 # miss rate for ReadReq accesses
-system.cpu.icache.ReadReq_miss_rate_0 0.069780 # miss rate for ReadReq accesses
-system.cpu.icache.ReadReq_misses 910 # number of ReadReq misses
-system.cpu.icache.ReadReq_misses_0 910 # number of ReadReq misses
+system.cpu.icache.ReadReq_accesses 13034 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_accesses_0 13034 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_avg_miss_latency 7812.430296 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_miss_latency_0 7812.430296 # average ReadReq miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 7184.680952 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_avg_mshr_miss_latency_0 7184.680952 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 12123 # number of ReadReq hits
+system.cpu.icache.ReadReq_hits_0 12123 # number of ReadReq hits
+system.cpu.icache.ReadReq_miss_latency 7117124 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_latency_0 7117124 # number of ReadReq miss cycles
+system.cpu.icache.ReadReq_miss_rate 0.069894 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_miss_rate_0 0.069894 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_misses 911 # number of ReadReq misses
+system.cpu.icache.ReadReq_misses_0 911 # number of ReadReq misses
system.cpu.icache.ReadReq_mshr_hits 281 # number of ReadReq MSHR hits
system.cpu.icache.ReadReq_mshr_hits_0 281 # number of ReadReq MSHR hits
-system.cpu.icache.ReadReq_mshr_miss_latency 4507481 # number of ReadReq MSHR miss cycles
-system.cpu.icache.ReadReq_mshr_miss_latency_0 4507481 # number of ReadReq MSHR miss cycles
-system.cpu.icache.ReadReq_mshr_miss_rate 0.048232 # mshr miss rate for ReadReq accesses
-system.cpu.icache.ReadReq_mshr_miss_rate_0 0.048232 # mshr miss rate for ReadReq accesses
-system.cpu.icache.ReadReq_mshr_misses 629 # number of ReadReq MSHR misses
-system.cpu.icache.ReadReq_mshr_misses_0 629 # number of ReadReq MSHR misses
+system.cpu.icache.ReadReq_mshr_miss_latency 4526349 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_latency_0 4526349 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.048335 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_miss_rate_0 0.048335 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_misses 630 # number of ReadReq MSHR misses
+system.cpu.icache.ReadReq_mshr_misses_0 630 # number of ReadReq MSHR misses
system.cpu.icache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
-system.cpu.icache.avg_blocked_cycles_no_targets 5755.187500 # average number of cycles each access was blocked
-system.cpu.icache.avg_refs 19.286169 # Average number of references to valid blocks.
+system.cpu.icache.avg_blocked_cycles_no_targets 5755.250000 # average number of cycles each access was blocked
+system.cpu.icache.avg_refs 19.242857 # Average number of references to valid blocks.
system.cpu.icache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.icache.blocked_no_targets 16 # number of cycles access was blocked
system.cpu.icache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
-system.cpu.icache.blocked_cycles_no_targets 92083 # number of cycles access was blocked
+system.cpu.icache.blocked_cycles_no_targets 92084 # number of cycles access was blocked
system.cpu.icache.cache_copies 0 # number of cache copies performed
-system.cpu.icache.demand_accesses 13041 # number of demand (read+write) accesses
-system.cpu.icache.demand_accesses_0 13041 # number of demand (read+write) accesses
+system.cpu.icache.demand_accesses 13034 # number of demand (read+write) accesses
+system.cpu.icache.demand_accesses_0 13034 # number of demand (read+write) accesses
system.cpu.icache.demand_accesses_1 0 # number of demand (read+write) accesses
-system.cpu.icache.demand_avg_miss_latency 7799.181319 # average overall miss latency
-system.cpu.icache.demand_avg_miss_latency_0 7799.181319 # average overall miss latency
+system.cpu.icache.demand_avg_miss_latency 7812.430296 # average overall miss latency
+system.cpu.icache.demand_avg_miss_latency_0 7812.430296 # average overall miss latency
system.cpu.icache.demand_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.icache.demand_avg_mshr_miss_latency 7166.106518 # average overall mshr miss latency
-system.cpu.icache.demand_avg_mshr_miss_latency_0 7166.106518 # average overall mshr miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency 7184.680952 # average overall mshr miss latency
+system.cpu.icache.demand_avg_mshr_miss_latency_0 7184.680952 # average overall mshr miss latency
system.cpu.icache.demand_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.icache.demand_hits 12131 # number of demand (read+write) hits
-system.cpu.icache.demand_hits_0 12131 # number of demand (read+write) hits
+system.cpu.icache.demand_hits 12123 # number of demand (read+write) hits
+system.cpu.icache.demand_hits_0 12123 # number of demand (read+write) hits
system.cpu.icache.demand_hits_1 0 # number of demand (read+write) hits
-system.cpu.icache.demand_miss_latency 7097255 # number of demand (read+write) miss cycles
-system.cpu.icache.demand_miss_latency_0 7097255 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_latency 7117124 # number of demand (read+write) miss cycles
+system.cpu.icache.demand_miss_latency_0 7117124 # number of demand (read+write) miss cycles
system.cpu.icache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles
-system.cpu.icache.demand_miss_rate 0.069780 # miss rate for demand accesses
-system.cpu.icache.demand_miss_rate_0 0.069780 # miss rate for demand accesses
-system.cpu.icache.demand_miss_rate_1 no value # miss rate for demand accesses
-system.cpu.icache.demand_misses 910 # number of demand (read+write) misses
-system.cpu.icache.demand_misses_0 910 # number of demand (read+write) misses
+system.cpu.icache.demand_miss_rate 0.069894 # miss rate for demand accesses
+system.cpu.icache.demand_miss_rate_0 0.069894 # miss rate for demand accesses
+system.cpu.icache.demand_miss_rate_1 <err: div-0> # miss rate for demand accesses
+system.cpu.icache.demand_misses 911 # number of demand (read+write) misses
+system.cpu.icache.demand_misses_0 911 # number of demand (read+write) misses
system.cpu.icache.demand_misses_1 0 # number of demand (read+write) misses
system.cpu.icache.demand_mshr_hits 281 # number of demand (read+write) MSHR hits
system.cpu.icache.demand_mshr_hits_0 281 # number of demand (read+write) MSHR hits
system.cpu.icache.demand_mshr_hits_1 0 # number of demand (read+write) MSHR hits
-system.cpu.icache.demand_mshr_miss_latency 4507481 # number of demand (read+write) MSHR miss cycles
-system.cpu.icache.demand_mshr_miss_latency_0 4507481 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_latency 4526349 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_latency_0 4526349 # number of demand (read+write) MSHR miss cycles
system.cpu.icache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles
-system.cpu.icache.demand_mshr_miss_rate 0.048232 # mshr miss rate for demand accesses
-system.cpu.icache.demand_mshr_miss_rate_0 0.048232 # mshr miss rate for demand accesses
-system.cpu.icache.demand_mshr_miss_rate_1 no value # mshr miss rate for demand accesses
-system.cpu.icache.demand_mshr_misses 629 # number of demand (read+write) MSHR misses
-system.cpu.icache.demand_mshr_misses_0 629 # number of demand (read+write) MSHR misses
+system.cpu.icache.demand_mshr_miss_rate 0.048335 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_miss_rate_0 0.048335 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_miss_rate_1 <err: div-0> # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_misses 630 # number of demand (read+write) MSHR misses
+system.cpu.icache.demand_mshr_misses_0 630 # number of demand (read+write) MSHR misses
system.cpu.icache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses
system.cpu.icache.fast_writes 0 # number of fast writes performed
system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.icache.mshr_cap_events_0 0 # number of times MSHR cap was activated
system.cpu.icache.mshr_cap_events_1 0 # number of times MSHR cap was activated
system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.icache.overall_accesses 13041 # number of overall (read+write) accesses
-system.cpu.icache.overall_accesses_0 13041 # number of overall (read+write) accesses
+system.cpu.icache.overall_accesses 13034 # number of overall (read+write) accesses
+system.cpu.icache.overall_accesses_0 13034 # number of overall (read+write) accesses
system.cpu.icache.overall_accesses_1 0 # number of overall (read+write) accesses
-system.cpu.icache.overall_avg_miss_latency 7799.181319 # average overall miss latency
-system.cpu.icache.overall_avg_miss_latency_0 7799.181319 # average overall miss latency
+system.cpu.icache.overall_avg_miss_latency 7812.430296 # average overall miss latency
+system.cpu.icache.overall_avg_miss_latency_0 7812.430296 # average overall miss latency
system.cpu.icache.overall_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.icache.overall_avg_mshr_miss_latency 7166.106518 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_miss_latency_0 7166.106518 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 7184.680952 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency_0 7184.680952 # average overall mshr miss latency
system.cpu.icache.overall_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency_0 no value # average overall mshr uncacheable latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency_1 no value # average overall mshr uncacheable latency
-system.cpu.icache.overall_hits 12131 # number of overall hits
-system.cpu.icache.overall_hits_0 12131 # number of overall hits
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency_0 <err: div-0> # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency_1 <err: div-0> # average overall mshr uncacheable latency
+system.cpu.icache.overall_hits 12123 # number of overall hits
+system.cpu.icache.overall_hits_0 12123 # number of overall hits
system.cpu.icache.overall_hits_1 0 # number of overall hits
-system.cpu.icache.overall_miss_latency 7097255 # number of overall miss cycles
-system.cpu.icache.overall_miss_latency_0 7097255 # number of overall miss cycles
+system.cpu.icache.overall_miss_latency 7117124 # number of overall miss cycles
+system.cpu.icache.overall_miss_latency_0 7117124 # number of overall miss cycles
system.cpu.icache.overall_miss_latency_1 0 # number of overall miss cycles
-system.cpu.icache.overall_miss_rate 0.069780 # miss rate for overall accesses
-system.cpu.icache.overall_miss_rate_0 0.069780 # miss rate for overall accesses
-system.cpu.icache.overall_miss_rate_1 no value # miss rate for overall accesses
-system.cpu.icache.overall_misses 910 # number of overall misses
-system.cpu.icache.overall_misses_0 910 # number of overall misses
+system.cpu.icache.overall_miss_rate 0.069894 # miss rate for overall accesses
+system.cpu.icache.overall_miss_rate_0 0.069894 # miss rate for overall accesses
+system.cpu.icache.overall_miss_rate_1 <err: div-0> # miss rate for overall accesses
+system.cpu.icache.overall_misses 911 # number of overall misses
+system.cpu.icache.overall_misses_0 911 # number of overall misses
system.cpu.icache.overall_misses_1 0 # number of overall misses
system.cpu.icache.overall_mshr_hits 281 # number of overall MSHR hits
system.cpu.icache.overall_mshr_hits_0 281 # number of overall MSHR hits
system.cpu.icache.overall_mshr_hits_1 0 # number of overall MSHR hits
-system.cpu.icache.overall_mshr_miss_latency 4507481 # number of overall MSHR miss cycles
-system.cpu.icache.overall_mshr_miss_latency_0 4507481 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_latency 4526349 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_latency_0 4526349 # number of overall MSHR miss cycles
system.cpu.icache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles
-system.cpu.icache.overall_mshr_miss_rate 0.048232 # mshr miss rate for overall accesses
-system.cpu.icache.overall_mshr_miss_rate_0 0.048232 # mshr miss rate for overall accesses
-system.cpu.icache.overall_mshr_miss_rate_1 no value # mshr miss rate for overall accesses
-system.cpu.icache.overall_mshr_misses 629 # number of overall MSHR misses
-system.cpu.icache.overall_mshr_misses_0 629 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_miss_rate 0.048335 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_miss_rate_0 0.048335 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_miss_rate_1 <err: div-0> # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_misses 630 # number of overall MSHR misses
+system.cpu.icache.overall_mshr_misses_0 630 # number of overall MSHR misses
system.cpu.icache.overall_mshr_misses_1 0 # number of overall MSHR misses
system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.icache.overall_mshr_uncacheable_latency_0 0 # number of overall MSHR uncacheable cycles
@@ -381,83 +381,83 @@ system.cpu.icache.prefetcher.num_hwpf_squashed_from_miss 0
system.cpu.icache.replacements 6 # number of replacements
system.cpu.icache.replacements_0 6 # number of replacements
system.cpu.icache.replacements_1 0 # number of replacements
-system.cpu.icache.sampled_refs 629 # Sample count of references to valid blocks.
+system.cpu.icache.sampled_refs 630 # Sample count of references to valid blocks.
system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.icache.soft_prefetch_mshr_full_0 0 # number of mshr full events for SW prefetching instrutions
system.cpu.icache.soft_prefetch_mshr_full_1 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.icache.tagsinuse 289.520052 # Cycle average of tags in use
-system.cpu.icache.total_refs 12131 # Total number of references to valid blocks.
+system.cpu.icache.tagsinuse 289.830640 # Cycle average of tags in use
+system.cpu.icache.total_refs 12123 # Total number of references to valid blocks.
system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.icache.writebacks 0 # number of writebacks
system.cpu.icache.writebacks_0 0 # number of writebacks
system.cpu.icache.writebacks_1 0 # number of writebacks
-system.cpu.idleCycles 2047914 # Total number of cycles that the CPU has spent unscheduled due to idling
-system.cpu.iew.EXEC:branches 4335 # Number of branches executed
-system.cpu.iew.EXEC:branches_0 2743 # Number of branches executed
-system.cpu.iew.EXEC:branches_1 1592 # Number of branches executed
+system.cpu.idleCycles 2053704 # Total number of cycles that the CPU has spent unscheduled due to idling
+system.cpu.iew.EXEC:branches 4333 # Number of branches executed
+system.cpu.iew.EXEC:branches_0 2744 # Number of branches executed
+system.cpu.iew.EXEC:branches_1 1589 # Number of branches executed
system.cpu.iew.EXEC:nop 76 # number of nop insts executed
system.cpu.iew.EXEC:nop_0 38 # number of nop insts executed
system.cpu.iew.EXEC:nop_1 38 # number of nop insts executed
-system.cpu.iew.EXEC:rate 0.146521 # Inst execution rate
-system.cpu.iew.EXEC:refs 11792 # number of memory reference insts executed
-system.cpu.iew.EXEC:refs_0 7324 # number of memory reference insts executed
-system.cpu.iew.EXEC:refs_1 4468 # number of memory reference insts executed
-system.cpu.iew.EXEC:stores 3821 # Number of stores executed
-system.cpu.iew.EXEC:stores_0 2506 # Number of stores executed
+system.cpu.iew.EXEC:rate 0.149461 # Inst execution rate
+system.cpu.iew.EXEC:refs 11794 # number of memory reference insts executed
+system.cpu.iew.EXEC:refs_0 7333 # number of memory reference insts executed
+system.cpu.iew.EXEC:refs_1 4461 # number of memory reference insts executed
+system.cpu.iew.EXEC:stores 3822 # Number of stores executed
+system.cpu.iew.EXEC:stores_0 2507 # Number of stores executed
system.cpu.iew.EXEC:stores_1 1315 # Number of stores executed
system.cpu.iew.EXEC:swp 0 # number of swp insts executed
system.cpu.iew.EXEC:swp_0 0 # number of swp insts executed
system.cpu.iew.EXEC:swp_1 0 # number of swp insts executed
-system.cpu.iew.WB:consumers 12302 # num instructions consuming a value
-system.cpu.iew.WB:consumers_0 6628 # num instructions consuming a value
-system.cpu.iew.WB:consumers_1 5674 # num instructions consuming a value
-system.cpu.iew.WB:count 22631 # cumulative count of insts written-back
-system.cpu.iew.WB:count_0 12849 # cumulative count of insts written-back
-system.cpu.iew.WB:count_1 9782 # cumulative count of insts written-back
-system.cpu.iew.WB:fanout 0.818810 # average fanout of values written-back
-system.cpu.iew.WB:fanout_0 0.828908 # average fanout of values written-back
-system.cpu.iew.WB:fanout_1 0.807014 # average fanout of values written-back
+system.cpu.iew.WB:consumers 12300 # num instructions consuming a value
+system.cpu.iew.WB:consumers_0 6629 # num instructions consuming a value
+system.cpu.iew.WB:consumers_1 5671 # num instructions consuming a value
+system.cpu.iew.WB:count 22619 # cumulative count of insts written-back
+system.cpu.iew.WB:count_0 12848 # cumulative count of insts written-back
+system.cpu.iew.WB:count_1 9771 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.818780 # average fanout of values written-back
+system.cpu.iew.WB:fanout_0 0.828933 # average fanout of values written-back
+system.cpu.iew.WB:fanout_1 0.806912 # average fanout of values written-back
system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
system.cpu.iew.WB:penalized_0 0 # number of instrctions required to write to 'other' IQ
system.cpu.iew.WB:penalized_1 0 # number of instrctions required to write to 'other' IQ
system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
system.cpu.iew.WB:penalized_rate_0 0 # fraction of instructions written-back that wrote to 'other' IQ
system.cpu.iew.WB:penalized_rate_1 0 # fraction of instructions written-back that wrote to 'other' IQ
-system.cpu.iew.WB:producers 10073 # num instructions producing a value
-system.cpu.iew.WB:producers_0 5494 # num instructions producing a value
-system.cpu.iew.WB:producers_1 4579 # num instructions producing a value
-system.cpu.iew.WB:rate 0.119583 # insts written-back per cycle
-system.cpu.iew.WB:rate_0 0.067895 # insts written-back per cycle
-system.cpu.iew.WB:rate_1 0.051689 # insts written-back per cycle
-system.cpu.iew.WB:sent 22783 # cumulative count of insts sent to commit
-system.cpu.iew.WB:sent_0 12935 # cumulative count of insts sent to commit
-system.cpu.iew.WB:sent_1 9848 # cumulative count of insts sent to commit
-system.cpu.iew.branchMispredicts 1057 # Number of branch mispredicts detected at execute
-system.cpu.iew.iewBlockCycles 60428 # Number of cycles IEW is blocking
-system.cpu.iew.iewDispLoadInsts 10160 # Number of dispatched load instructions
+system.cpu.iew.WB:producers 10071 # num instructions producing a value
+system.cpu.iew.WB:producers_0 5495 # num instructions producing a value
+system.cpu.iew.WB:producers_1 4576 # num instructions producing a value
+system.cpu.iew.WB:rate 0.121962 # insts written-back per cycle
+system.cpu.iew.WB:rate_0 0.069276 # insts written-back per cycle
+system.cpu.iew.WB:rate_1 0.052685 # insts written-back per cycle
+system.cpu.iew.WB:sent 22770 # cumulative count of insts sent to commit
+system.cpu.iew.WB:sent_0 12934 # cumulative count of insts sent to commit
+system.cpu.iew.WB:sent_1 9836 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 1054 # Number of branch mispredicts detected at execute
+system.cpu.iew.iewBlockCycles 56608 # Number of cycles IEW is blocking
+system.cpu.iew.iewDispLoadInsts 10141 # Number of dispatched load instructions
system.cpu.iew.iewDispNonSpecInsts 43 # Number of dispatched non-speculative instructions
-system.cpu.iew.iewDispSquashedInsts 5995 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispSquashedInsts 5984 # Number of squashed instructions skipped by dispatch
system.cpu.iew.iewDispStoreInsts 8226 # Number of dispatched store instructions
-system.cpu.iew.iewDispatchedInsts 42995 # Number of instructions dispatched to IQ
-system.cpu.iew.iewExecLoadInsts 7971 # Number of load instructions executed
-system.cpu.iew.iewExecLoadInsts_0 4818 # Number of load instructions executed
-system.cpu.iew.iewExecLoadInsts_1 3153 # Number of load instructions executed
-system.cpu.iew.iewExecSquashedInsts 1093 # Number of squashed instructions skipped in execute
-system.cpu.iew.iewExecutedInsts 27729 # Number of executed instructions
+system.cpu.iew.iewDispatchedInsts 42965 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 7972 # Number of load instructions executed
+system.cpu.iew.iewExecLoadInsts_0 4826 # Number of load instructions executed
+system.cpu.iew.iewExecLoadInsts_1 3146 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 1094 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 27719 # Number of executed instructions
system.cpu.iew.iewIQFullEvents 37 # Number of times the IQ has become full, causing a stall
system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
-system.cpu.iew.iewLSQFullEvents 2 # Number of times the LSQ has become full, causing a stall
-system.cpu.iew.iewSquashCycles 6044 # Number of cycles IEW is squashing
-system.cpu.iew.iewUnblockCycles 109 # Number of cycles IEW is unblocking
+system.cpu.iew.iewLSQFullEvents 5 # Number of times the LSQ has become full, causing a stall
+system.cpu.iew.iewSquashCycles 6036 # Number of cycles IEW is squashing
+system.cpu.iew.iewUnblockCycles 111 # Number of cycles IEW is unblocking
system.cpu.iew.lsq.thread.0.blockedLoads 1 # Number of blocked loads due to partial load-store forwarding
-system.cpu.iew.lsq.thread.0.cacheBlocked 3147 # Number of times an access to memory failed due to the cache being blocked
+system.cpu.iew.lsq.thread.0.cacheBlocked 3148 # Number of times an access to memory failed due to the cache being blocked
system.cpu.iew.lsq.thread.0.forwLoads 62 # Number of loads that had data forwarded from stores
system.cpu.iew.lsq.thread.0.ignoredResponses 5 # Number of memory responses ignored because the instruction is squashed
system.cpu.iew.lsq.thread.0.invAddrLoads 0 # Number of loads ignored due to an invalid address
system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
-system.cpu.iew.lsq.thread.0.memOrderViolation 40 # Number of memory ordering violations
+system.cpu.iew.lsq.thread.0.memOrderViolation 39 # Number of memory ordering violations
system.cpu.iew.lsq.thread.0.rescheduledLoads 1 # Number of loads that were rescheduled
-system.cpu.iew.lsq.thread.0.squashedLoads 5581 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedLoads 5570 # Number of loads squashed
system.cpu.iew.lsq.thread.0.squashedStores 5025 # Number of stores squashed
system.cpu.iew.lsq.thread.1.blockedLoads 1 # Number of blocked loads due to partial load-store forwarding
system.cpu.iew.lsq.thread.1.cacheBlocked 1500 # Number of times an access to memory failed due to the cache being blocked
@@ -467,35 +467,35 @@ system.cpu.iew.lsq.thread.1.invAddrLoads 0 # Nu
system.cpu.iew.lsq.thread.1.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
system.cpu.iew.lsq.thread.1.memOrderViolation 34 # Number of memory ordering violations
system.cpu.iew.lsq.thread.1.rescheduledLoads 1 # Number of loads that were rescheduled
-system.cpu.iew.lsq.thread.1.squashedLoads 2621 # Number of loads squashed
+system.cpu.iew.lsq.thread.1.squashedLoads 2613 # Number of loads squashed
system.cpu.iew.lsq.thread.1.squashedStores 1577 # Number of stores squashed
-system.cpu.iew.memOrderViolationEvents 74 # Number of memory order violations
-system.cpu.iew.predictedNotTakenIncorrect 830 # Number of branches that were predicted not taken incorrectly
+system.cpu.iew.memOrderViolationEvents 73 # Number of memory order violations
+system.cpu.iew.predictedNotTakenIncorrect 827 # Number of branches that were predicted not taken incorrectly
system.cpu.iew.predictedTakenIncorrect 227 # Number of branches that were predicted taken incorrectly
-system.cpu.ipc_0 0.002514 # IPC: Instructions Per Cycle
-system.cpu.ipc_1 0.002513 # IPC: Instructions Per Cycle
-system.cpu.ipc_total 0.005027 # IPC: Total IPC of All Threads
-system.cpu.iq.ISSUE:FU_type_0 16810 # Type of FU issued
+system.cpu.ipc_0 0.002512 # IPC: Instructions Per Cycle
+system.cpu.ipc_1 0.002511 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.005023 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0 16815 # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.start_dist
-(null) 2 0.01% # Type of FU issued
-IntAlu 9156 54.47% # Type of FU issued
-IntMult 1 0.01% # Type of FU issued
-IntDiv 0 0.00% # Type of FU issued
-FloatAdd 2 0.01% # Type of FU issued
-FloatCmp 0 0.00% # Type of FU issued
-FloatCvt 0 0.00% # Type of FU issued
-FloatMult 0 0.00% # Type of FU issued
-FloatDiv 0 0.00% # Type of FU issued
-FloatSqrt 0 0.00% # Type of FU issued
-MemRead 5111 30.40% # Type of FU issued
-MemWrite 2538 15.10% # Type of FU issued
-IprAccess 0 0.00% # Type of FU issued
-InstPrefetch 0 0.00% # Type of FU issued
+ (null) 2 0.01% # Type of FU issued
+ IntAlu 9152 54.43% # Type of FU issued
+ IntMult 1 0.01% # Type of FU issued
+ IntDiv 0 0.00% # Type of FU issued
+ FloatAdd 2 0.01% # Type of FU issued
+ FloatCmp 0 0.00% # Type of FU issued
+ FloatCvt 0 0.00% # Type of FU issued
+ FloatMult 0 0.00% # Type of FU issued
+ FloatDiv 0 0.00% # Type of FU issued
+ FloatSqrt 0 0.00% # Type of FU issued
+ MemRead 5119 30.44% # Type of FU issued
+ MemWrite 2539 15.10% # Type of FU issued
+ IprAccess 0 0.00% # Type of FU issued
+ InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.end_dist
-system.cpu.iq.ISSUE:FU_type_1 12012 # Type of FU issued
+system.cpu.iq.ISSUE:FU_type_1 11998 # Type of FU issued
system.cpu.iq.ISSUE:FU_type_1.start_dist
(null) 2 0.02% # Type of FU issued
- IntAlu 7390 61.52% # Type of FU issued
+ IntAlu 7386 61.56% # Type of FU issued
IntMult 1 0.01% # Type of FU issued
IntDiv 0 0.00% # Type of FU issued
FloatAdd 2 0.02% # Type of FU issued
@@ -504,15 +504,15 @@ system.cpu.iq.ISSUE:FU_type_1.start_dist
FloatMult 0 0.00% # Type of FU issued
FloatDiv 0 0.00% # Type of FU issued
FloatSqrt 0 0.00% # Type of FU issued
- MemRead 3275 27.26% # Type of FU issued
- MemWrite 1342 11.17% # Type of FU issued
+ MemRead 3265 27.21% # Type of FU issued
+ MemWrite 1342 11.19% # Type of FU issued
IprAccess 0 0.00% # Type of FU issued
InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type_1.end_dist
-system.cpu.iq.ISSUE:FU_type 28822 # Type of FU issued
+system.cpu.iq.ISSUE:FU_type 28813 # Type of FU issued
system.cpu.iq.ISSUE:FU_type.start_dist
(null) 4 0.01% # Type of FU issued
- IntAlu 16546 57.41% # Type of FU issued
+ IntAlu 16538 57.40% # Type of FU issued
IntMult 2 0.01% # Type of FU issued
IntDiv 0 0.00% # Type of FU issued
FloatAdd 4 0.01% # Type of FU issued
@@ -521,20 +521,20 @@ system.cpu.iq.ISSUE:FU_type.start_dist
FloatMult 0 0.00% # Type of FU issued
FloatDiv 0 0.00% # Type of FU issued
FloatSqrt 0 0.00% # Type of FU issued
- MemRead 8386 29.10% # Type of FU issued
- MemWrite 3880 13.46% # Type of FU issued
+ MemRead 8384 29.10% # Type of FU issued
+ MemWrite 3881 13.47% # Type of FU issued
IprAccess 0 0.00% # Type of FU issued
InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type.end_dist
-system.cpu.iq.ISSUE:fu_busy_cnt 154 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_cnt 150 # FU busy when requested
system.cpu.iq.ISSUE:fu_busy_cnt_0 76 # FU busy when requested
-system.cpu.iq.ISSUE:fu_busy_cnt_1 78 # FU busy when requested
-system.cpu.iq.ISSUE:fu_busy_rate 0.005343 # FU busy rate (busy events/executed inst)
-system.cpu.iq.ISSUE:fu_busy_rate_0 0.002637 # FU busy rate (busy events/executed inst)
-system.cpu.iq.ISSUE:fu_busy_rate_1 0.002706 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_busy_cnt_1 74 # FU busy when requested
+system.cpu.iq.ISSUE:fu_busy_rate 0.005206 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_busy_rate_0 0.002638 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_busy_rate_1 0.002568 # FU busy rate (busy events/executed inst)
system.cpu.iq.ISSUE:fu_full.start_dist
(null) 0 0.00% # attempts to use FU when none available
- IntAlu 3 1.95% # attempts to use FU when none available
+ IntAlu 3 2.00% # attempts to use FU when none available
IntMult 0 0.00% # attempts to use FU when none available
IntDiv 0 0.00% # attempts to use FU when none available
FloatAdd 0 0.00% # attempts to use FU when none available
@@ -543,135 +543,135 @@ system.cpu.iq.ISSUE:fu_full.start_dist
FloatMult 0 0.00% # attempts to use FU when none available
FloatDiv 0 0.00% # attempts to use FU when none available
FloatSqrt 0 0.00% # attempts to use FU when none available
- MemRead 86 55.84% # attempts to use FU when none available
- MemWrite 65 42.21% # attempts to use FU when none available
+ MemRead 84 56.00% # attempts to use FU when none available
+ MemWrite 63 42.00% # attempts to use FU when none available
IprAccess 0 0.00% # attempts to use FU when none available
InstPrefetch 0 0.00% # attempts to use FU when none available
system.cpu.iq.ISSUE:fu_full.end_dist
system.cpu.iq.ISSUE:issued_per_cycle.start_dist # Number of insts issued each cycle
-system.cpu.iq.ISSUE:issued_per_cycle.samples 189249
+system.cpu.iq.ISSUE:issued_per_cycle.samples 185460
system.cpu.iq.ISSUE:issued_per_cycle.min_value 0
- 0 174743 9233.50%
- 1 7200 380.45%
- 2 2967 156.78%
- 3 2563 135.43%
- 4 1137 60.08%
- 5 450 23.78%
- 6 138 7.29%
- 7 35 1.85%
- 8 16 0.85%
+ 0 170959 9218.11%
+ 1 7202 388.33%
+ 2 2947 158.90%
+ 3 2569 138.52%
+ 4 1155 62.28%
+ 5 444 23.94%
+ 6 134 7.23%
+ 7 34 1.83%
+ 8 16 0.86%
system.cpu.iq.ISSUE:issued_per_cycle.max_value 8
system.cpu.iq.ISSUE:issued_per_cycle.end_dist
-system.cpu.iq.ISSUE:rate 0.152297 # Inst issue rate
-system.cpu.iq.iqInstsAdded 42876 # Number of instructions added to the IQ (excludes non-spec)
-system.cpu.iq.iqInstsIssued 28822 # Number of instructions issued
+system.cpu.iq.ISSUE:rate 0.155360 # Inst issue rate
+system.cpu.iq.iqInstsAdded 42846 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 28813 # Number of instructions issued
system.cpu.iq.iqNonSpecInstsAdded 43 # Number of non-speculative instructions added to the IQ
-system.cpu.iq.iqSquashedInstsExamined 30249 # Number of squashed instructions iterated over during squash; mainly for profiling
-system.cpu.iq.iqSquashedInstsIssued 220 # Number of squashed instructions issued
+system.cpu.iq.iqSquashedInstsExamined 30225 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedInstsIssued 210 # Number of squashed instructions issued
system.cpu.iq.iqSquashedNonSpecRemoved 9 # Number of squashed non-spec instructions that were removed
-system.cpu.iq.iqSquashedOperandsExamined 25020 # Number of squashed operands that are examined and possibly removed from graph
-system.cpu.l2cache.ReadReq_accesses 975 # number of ReadReq accesses(hits+misses)
-system.cpu.l2cache.ReadReq_accesses_0 975 # number of ReadReq accesses(hits+misses)
-system.cpu.l2cache.ReadReq_avg_miss_latency 6774.326824 # average ReadReq miss latency
-system.cpu.l2cache.ReadReq_avg_miss_latency_0 6774.326824 # average ReadReq miss latency
-system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 3621.391572 # average ReadReq mshr miss latency
-system.cpu.l2cache.ReadReq_avg_mshr_miss_latency_0 3621.391572 # average ReadReq mshr miss latency
+system.cpu.iq.iqSquashedOperandsExamined 24996 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadReq_accesses 976 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_accesses_0 976 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadReq_avg_miss_latency 6784.690965 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_miss_latency_0 6784.690965 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 3622.808008 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency_0 3622.808008 # average ReadReq mshr miss latency
system.cpu.l2cache.ReadReq_hits 2 # number of ReadReq hits
system.cpu.l2cache.ReadReq_hits_0 2 # number of ReadReq hits
-system.cpu.l2cache.ReadReq_miss_latency 6591420 # number of ReadReq miss cycles
-system.cpu.l2cache.ReadReq_miss_latency_0 6591420 # number of ReadReq miss cycles
-system.cpu.l2cache.ReadReq_miss_rate 0.997949 # miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_miss_rate_0 0.997949 # miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_misses 973 # number of ReadReq misses
-system.cpu.l2cache.ReadReq_misses_0 973 # number of ReadReq misses
-system.cpu.l2cache.ReadReq_mshr_miss_latency 3523614 # number of ReadReq MSHR miss cycles
-system.cpu.l2cache.ReadReq_mshr_miss_latency_0 3523614 # number of ReadReq MSHR miss cycles
-system.cpu.l2cache.ReadReq_mshr_miss_rate 0.997949 # mshr miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_mshr_miss_rate_0 0.997949 # mshr miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_mshr_misses 973 # number of ReadReq MSHR misses
-system.cpu.l2cache.ReadReq_mshr_misses_0 973 # number of ReadReq MSHR misses
+system.cpu.l2cache.ReadReq_miss_latency 6608289 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_latency_0 6608289 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.997951 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_miss_rate_0 0.997951 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 974 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_misses_0 974 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 3528615 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_latency_0 3528615 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.997951 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_miss_rate_0 0.997951 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 974 # number of ReadReq MSHR misses
+system.cpu.l2cache.ReadReq_mshr_misses_0 974 # number of ReadReq MSHR misses
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.002055 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.002053 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 975 # number of demand (read+write) accesses
-system.cpu.l2cache.demand_accesses_0 975 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 976 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses_0 976 # number of demand (read+write) accesses
system.cpu.l2cache.demand_accesses_1 0 # number of demand (read+write) accesses
-system.cpu.l2cache.demand_avg_miss_latency 6774.326824 # average overall miss latency
-system.cpu.l2cache.demand_avg_miss_latency_0 6774.326824 # average overall miss latency
+system.cpu.l2cache.demand_avg_miss_latency 6784.690965 # average overall miss latency
+system.cpu.l2cache.demand_avg_miss_latency_0 6784.690965 # average overall miss latency
system.cpu.l2cache.demand_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.l2cache.demand_avg_mshr_miss_latency 3621.391572 # average overall mshr miss latency
-system.cpu.l2cache.demand_avg_mshr_miss_latency_0 3621.391572 # average overall mshr miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 3622.808008 # average overall mshr miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency_0 3622.808008 # average overall mshr miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits
system.cpu.l2cache.demand_hits_0 2 # number of demand (read+write) hits
system.cpu.l2cache.demand_hits_1 0 # number of demand (read+write) hits
-system.cpu.l2cache.demand_miss_latency 6591420 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_latency_0 6591420 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_latency 6608289 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_latency_0 6608289 # number of demand (read+write) miss cycles
system.cpu.l2cache.demand_miss_latency_1 0 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.997949 # miss rate for demand accesses
-system.cpu.l2cache.demand_miss_rate_0 0.997949 # miss rate for demand accesses
-system.cpu.l2cache.demand_miss_rate_1 no value # miss rate for demand accesses
-system.cpu.l2cache.demand_misses 973 # number of demand (read+write) misses
-system.cpu.l2cache.demand_misses_0 973 # number of demand (read+write) misses
+system.cpu.l2cache.demand_miss_rate 0.997951 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate_0 0.997951 # miss rate for demand accesses
+system.cpu.l2cache.demand_miss_rate_1 <err: div-0> # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 974 # number of demand (read+write) misses
+system.cpu.l2cache.demand_misses_0 974 # number of demand (read+write) misses
system.cpu.l2cache.demand_misses_1 0 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_hits_0 0 # number of demand (read+write) MSHR hits
system.cpu.l2cache.demand_mshr_hits_1 0 # number of demand (read+write) MSHR hits
-system.cpu.l2cache.demand_mshr_miss_latency 3523614 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_latency_0 3523614 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_latency 3528615 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_latency_0 3528615 # number of demand (read+write) MSHR miss cycles
system.cpu.l2cache.demand_mshr_miss_latency_1 0 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.997949 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_miss_rate_0 0.997949 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_miss_rate_1 no value # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_misses 973 # number of demand (read+write) MSHR misses
-system.cpu.l2cache.demand_mshr_misses_0 973 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.demand_mshr_miss_rate 0.997951 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate_0 0.997951 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_miss_rate_1 <err: div-0> # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 974 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.demand_mshr_misses_0 974 # number of demand (read+write) MSHR misses
system.cpu.l2cache.demand_mshr_misses_1 0 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.mshr_cap_events_0 0 # number of times MSHR cap was activated
system.cpu.l2cache.mshr_cap_events_1 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 975 # number of overall (read+write) accesses
-system.cpu.l2cache.overall_accesses_0 975 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 976 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses_0 976 # number of overall (read+write) accesses
system.cpu.l2cache.overall_accesses_1 0 # number of overall (read+write) accesses
-system.cpu.l2cache.overall_avg_miss_latency 6774.326824 # average overall miss latency
-system.cpu.l2cache.overall_avg_miss_latency_0 6774.326824 # average overall miss latency
+system.cpu.l2cache.overall_avg_miss_latency 6784.690965 # average overall miss latency
+system.cpu.l2cache.overall_avg_miss_latency_0 6784.690965 # average overall miss latency
system.cpu.l2cache.overall_avg_miss_latency_1 <err: div-0> # average overall miss latency
-system.cpu.l2cache.overall_avg_mshr_miss_latency 3621.391572 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_miss_latency_0 3621.391572 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 3622.808008 # average overall mshr miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency_0 3622.808008 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency_1 <err: div-0> # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_0 no value # average overall mshr uncacheable latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_1 no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_0 <err: div-0> # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency_1 <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 2 # number of overall hits
system.cpu.l2cache.overall_hits_0 2 # number of overall hits
system.cpu.l2cache.overall_hits_1 0 # number of overall hits
-system.cpu.l2cache.overall_miss_latency 6591420 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_latency_0 6591420 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_latency 6608289 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_latency_0 6608289 # number of overall miss cycles
system.cpu.l2cache.overall_miss_latency_1 0 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.997949 # miss rate for overall accesses
-system.cpu.l2cache.overall_miss_rate_0 0.997949 # miss rate for overall accesses
-system.cpu.l2cache.overall_miss_rate_1 no value # miss rate for overall accesses
-system.cpu.l2cache.overall_misses 973 # number of overall misses
-system.cpu.l2cache.overall_misses_0 973 # number of overall misses
+system.cpu.l2cache.overall_miss_rate 0.997951 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate_0 0.997951 # miss rate for overall accesses
+system.cpu.l2cache.overall_miss_rate_1 <err: div-0> # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 974 # number of overall misses
+system.cpu.l2cache.overall_misses_0 974 # number of overall misses
system.cpu.l2cache.overall_misses_1 0 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_hits_0 0 # number of overall MSHR hits
system.cpu.l2cache.overall_mshr_hits_1 0 # number of overall MSHR hits
-system.cpu.l2cache.overall_mshr_miss_latency 3523614 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_latency_0 3523614 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_latency 3528615 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_latency_0 3528615 # number of overall MSHR miss cycles
system.cpu.l2cache.overall_mshr_miss_latency_1 0 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.997949 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_miss_rate_0 0.997949 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_miss_rate_1 no value # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_misses 973 # number of overall MSHR misses
-system.cpu.l2cache.overall_mshr_misses_0 973 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_miss_rate 0.997951 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate_0 0.997951 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_miss_rate_1 <err: div-0> # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 974 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_misses_0 974 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_misses_1 0 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_latency_0 0 # number of overall MSHR uncacheable cycles
@@ -691,35 +691,35 @@ system.cpu.l2cache.prefetcher.num_hwpf_squashed_from_miss 0
system.cpu.l2cache.replacements 0 # number of replacements
system.cpu.l2cache.replacements_0 0 # number of replacements
system.cpu.l2cache.replacements_1 0 # number of replacements
-system.cpu.l2cache.sampled_refs 973 # Sample count of references to valid blocks.
+system.cpu.l2cache.sampled_refs 974 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.soft_prefetch_mshr_full_0 0 # number of mshr full events for SW prefetching instrutions
system.cpu.l2cache.soft_prefetch_mshr_full_1 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.l2cache.tagsinuse 489.175621 # Cycle average of tags in use
+system.cpu.l2cache.tagsinuse 489.614756 # Cycle average of tags in use
system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.l2cache.writebacks_0 0 # number of writebacks
system.cpu.l2cache.writebacks_1 0 # number of writebacks
-system.cpu.numCycles 189249 # number of cpu cycles simulated
-system.cpu.rename.RENAME:BlockCycles 77071 # Number of cycles rename is blocking
+system.cpu.numCycles 185460 # number of cpu cycles simulated
+system.cpu.rename.RENAME:BlockCycles 73308 # Number of cycles rename is blocking
system.cpu.rename.RENAME:CommittedMaps 8102 # Number of HB maps that are committed
-system.cpu.rename.RENAME:IQFullEvents 22 # Number of times rename has blocked due to IQ full
-system.cpu.rename.RENAME:IdleCycles 258812 # Number of cycles rename is idle
-system.cpu.rename.RENAME:LSQFullEvents 2912 # Number of times rename has blocked due to LSQ full
-system.cpu.rename.RENAME:ROBFullEvents 26 # Number of times rename has blocked due to ROB full
-system.cpu.rename.RENAME:RenameLookups 78724 # Number of register rename lookups that rename has made
-system.cpu.rename.RENAME:RenamedInsts 64105 # Number of instructions processed by rename
-system.cpu.rename.RENAME:RenamedOperands 44626 # Number of destination operands rename has renamed
-system.cpu.rename.RENAME:RunCycles 11563 # Number of cycles rename is running
-system.cpu.rename.RENAME:SquashCycles 6044 # Number of cycles rename is squashing
-system.cpu.rename.RENAME:UnblockCycles 2613 # Number of cycles rename is unblocking
-system.cpu.rename.RENAME:UndoneMaps 36524 # Number of HB maps that are undone due to squashing
-system.cpu.rename.RENAME:serializeStallCycles 22222 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:IQFullEvents 20 # Number of times rename has blocked due to IQ full
+system.cpu.rename.RENAME:IdleCycles 256900 # Number of cycles rename is idle
+system.cpu.rename.RENAME:LSQFullEvents 2907 # Number of times rename has blocked due to LSQ full
+system.cpu.rename.RENAME:ROBFullEvents 29 # Number of times rename has blocked due to ROB full
+system.cpu.rename.RENAME:RenameLookups 78661 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 64047 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 44573 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 11548 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 6036 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:UnblockCycles 2611 # Number of cycles rename is unblocking
+system.cpu.rename.RENAME:UndoneMaps 36471 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 20343 # count of cycles rename stalled for serializing inst
system.cpu.rename.RENAME:serializingInsts 52 # count of serializing insts renamed
-system.cpu.rename.RENAME:skidInsts 5371 # count of insts added to the skid buffer
+system.cpu.rename.RENAME:skidInsts 5370 # count of insts added to the skid buffer
system.cpu.rename.RENAME:tempSerializingInsts 39 # count of temporary serializing insts renamed
-system.cpu.timesIdled 686 # Number of times that the entire CPU went into an idle state and unscheduled itself
+system.cpu.timesIdled 688 # Number of times that the entire CPU went into an idle state and unscheduled itself
system.cpu.workload0.PROG:num_syscalls 17 # Number of system calls
system.cpu.workload1.PROG:num_syscalls 17 # Number of system calls
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
index e192672a7..922a00186 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stderr
@@ -1,7 +1,5 @@
warn: Entering event queue @ 0. Starting simulation...
warn: cycle 0: fault (page_table_fault) detected @ PC 0x000000
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
-warn: Increasing stack 0x11ff92000:0x11ff9b000 to 0x11ff90000:0x11ff9b000 because of access to 0x11ff91ff0
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
@@ -28,5 +26,4 @@ warn: cycle 1311129: fault (page_table_fault) detected @ PC 0x000000
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
warn: Default fetch doesn't update it's state from a functional call.
-warn: Found outstanding miss on an non-update probe
warn: Default fetch doesn't update it's state from a functional call.
diff --git a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
index 9ffc67aec..92d806315 100644
--- a/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
+++ b/tests/quick/01.hello-2T-smt/ref/alpha/linux/o3-timing/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:09:16 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:57 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing
-Exiting @ tick 2237162 because target called exit()
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/01.hello-2T-smt/alpha/linux/o3-timing tests/run.py quick/01.hello-2T-smt/alpha/linux/o3-timing
+Exiting @ tick 2239163 because target called exit()
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
index c45637b94..409b641a2 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.ini
@@ -85,7 +85,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
simulate_stalls=false
@@ -116,7 +115,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
simulate_stalls=false
@@ -177,19 +175,39 @@ cpu=system.cpu0
[system.iobus]
type=Bus
+children=responder
bus_id=0
clock=2
+responder_set=true
width=64
default=system.tsunami.pciconfig.pio
port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio system.tsunami.fake_sm_chip.pio system.tsunami.fake_uart1.pio system.tsunami.fake_uart2.pio system.tsunami.fake_uart3.pio system.tsunami.fake_uart4.pio system.tsunami.fake_ppc.pio system.tsunami.fake_OROM.pio system.tsunami.fake_pnp_addr.pio system.tsunami.fake_pnp_write.pio system.tsunami.fake_pnp_read0.pio system.tsunami.fake_pnp_read1.pio system.tsunami.fake_pnp_read2.pio system.tsunami.fake_pnp_read3.pio system.tsunami.fake_pnp_read4.pio system.tsunami.fake_pnp_read5.pio system.tsunami.fake_pnp_read6.pio system.tsunami.fake_pnp_read7.pio system.tsunami.fake_ata0.pio system.tsunami.fake_ata1.pio system.tsunami.fb.pio system.tsunami.io.pio system.tsunami.uart.pio system.tsunami.console.pio system.tsunami.ide.pio system.tsunami.ethernet.pio system.tsunami.ethernet.config system.tsunami.ethernet.dma system.tsunami.ide.config system.tsunami.ide.dma
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+
[system.membus]
type=Bus
+children=responder
bus_id=1
clock=2
+responder_set=false
width=64
+default=system.membus.responder.pio
port=system.bridge.side_b system.physmem.port system.cpu0.icache_port system.cpu0.dcache_port system.cpu1.icache_port system.cpu1.dcache_port
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+pio=system.membus.default
+
[system.physmem]
type=PhysicalMemory
file=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
index 45cbbec9b..d4ec3e5b3 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/config.out
@@ -31,6 +31,50 @@ type=Bus
bus_id=1
clock=2
width=64
+responder_set=false
+
+[system.cpu0.itb]
+type=AlphaITB
+size=48
+
+[system.cpu0.dtb]
+type=AlphaDTB
+size=64
+
+[system.cpu0]
+type=AtomicSimpleCPU
+max_insts_any_thread=0
+max_insts_all_threads=0
+max_loads_any_thread=0
+max_loads_all_threads=0
+progress_interval=0
+system=system
+cpu_id=0
+itb=system.cpu0.itb
+dtb=system.cpu0.dtb
+profile=0
+clock=1
+defer_registration=false
+width=1
+function_trace=false
+function_trace_start=0
+simulate_stalls=false
+
+[system.intrctrl]
+type=IntrControl
+cpu=system.cpu0
+
+[system.tsunami]
+type=Tsunami
+system=system
+intrctrl=system.intrctrl
+
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[system.bridge]
type=Bridge
@@ -75,34 +119,6 @@ image=system.disk2.image
driveID=master
delay=2000
-[system.cpu0.itb]
-type=AlphaITB
-size=48
-
-[system.cpu0.dtb]
-type=AlphaDTB
-size=64
-
-[system.cpu0]
-type=AtomicSimpleCPU
-max_insts_any_thread=0
-max_insts_all_threads=0
-max_loads_any_thread=0
-max_loads_all_threads=0
-progress_interval=0
-mem=system.physmem
-system=system
-cpu_id=0
-itb=system.cpu0.itb
-dtb=system.cpu0.dtb
-profile=0
-clock=1
-defer_registration=false
-width=1
-function_trace=false
-function_trace_start=0
-simulate_stalls=false
-
[system.cpu1.itb]
type=AlphaITB
size=48
@@ -118,7 +134,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.physmem
system=system
cpu_id=1
itb=system.cpu1.itb
@@ -131,10 +146,6 @@ function_trace=false
function_trace_start=0
simulate_stalls=false
-[system.intrctrl]
-type=IntrControl
-cpu=system.cpu0
-
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
@@ -145,11 +156,6 @@ type=SimpleDisk
system=system
disk=system.simple_disk.disk
-[system.tsunami]
-type=Tsunami
-system=system
-intrctrl=system.intrctrl
-
[system.tsunami.fake_uart1]
type=IsaFake
pio_addr=8804615848696
@@ -495,6 +501,14 @@ type=Bus
bus_id=0
clock=2
width=64
+responder_set=true
+
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[trace]
flags=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
index e76c1d683..4639640fe 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1270607 # Simulator instruction rate (inst/s)
-host_mem_usage 197696 # Number of bytes of host memory used
-host_seconds 51.09 # Real time elapsed on the host
-host_tick_rate 72782461 # Simulator tick rate (ticks/s)
+host_inst_rate 1289947 # Simulator instruction rate (inst/s)
+host_mem_usage 199348 # Number of bytes of host memory used
+host_seconds 50.32 # Real time elapsed on the host
+host_tick_rate 73890229 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 64909600 # Number of instructions simulated
sim_seconds 1.859078 # Number of seconds simulated
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
index 14aa2c9ff..9d74574e7 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stderr
@@ -1,6 +1,10 @@
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
-Listening for console connection on port 3456
-0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
-0: system.remote_gdb.listener: listening for remote gdb #1 on port 7001
+Listening for console connection on port 3457
+0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
+0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002
warn: Entering event queue @ 0. Starting simulation...
warn: 195723: Trying to launch CPU number 1!
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
index 18365db1c..8bfefbb7c 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic-dual/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 8 2006 21:57:24
-M5 started Sun Oct 8 21:58:13 2006
-M5 executing on zed.eecs.umich.edu
+M5 compiled Nov 5 2006 19:41:29
+M5 started Sun Nov 5 20:03:49 2006
+M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic-dual
Exiting @ tick 3718155709 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
index 11b108837..a862353cb 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.ini
@@ -85,7 +85,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
simulate_stalls=false
@@ -146,19 +145,39 @@ cpu=system.cpu
[system.iobus]
type=Bus
+children=responder
bus_id=0
clock=2
+responder_set=true
width=64
default=system.tsunami.pciconfig.pio
port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio system.tsunami.fake_sm_chip.pio system.tsunami.fake_uart1.pio system.tsunami.fake_uart2.pio system.tsunami.fake_uart3.pio system.tsunami.fake_uart4.pio system.tsunami.fake_ppc.pio system.tsunami.fake_OROM.pio system.tsunami.fake_pnp_addr.pio system.tsunami.fake_pnp_write.pio system.tsunami.fake_pnp_read0.pio system.tsunami.fake_pnp_read1.pio system.tsunami.fake_pnp_read2.pio system.tsunami.fake_pnp_read3.pio system.tsunami.fake_pnp_read4.pio system.tsunami.fake_pnp_read5.pio system.tsunami.fake_pnp_read6.pio system.tsunami.fake_pnp_read7.pio system.tsunami.fake_ata0.pio system.tsunami.fake_ata1.pio system.tsunami.fb.pio system.tsunami.io.pio system.tsunami.uart.pio system.tsunami.console.pio system.tsunami.ide.pio system.tsunami.ethernet.pio system.tsunami.ethernet.config system.tsunami.ethernet.dma system.tsunami.ide.config system.tsunami.ide.dma
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+
[system.membus]
type=Bus
+children=responder
bus_id=1
clock=2
+responder_set=false
width=64
+default=system.membus.responder.pio
port=system.bridge.side_b system.physmem.port system.cpu.icache_port system.cpu.dcache_port
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+pio=system.membus.default
+
[system.physmem]
type=PhysicalMemory
file=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
index e5c6e96f8..739a4860a 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/config.out
@@ -31,6 +31,50 @@ type=Bus
bus_id=1
clock=2
width=64
+responder_set=false
+
+[system.cpu.itb]
+type=AlphaITB
+size=48
+
+[system.cpu.dtb]
+type=AlphaDTB
+size=64
+
+[system.cpu]
+type=AtomicSimpleCPU
+max_insts_any_thread=0
+max_insts_all_threads=0
+max_loads_any_thread=0
+max_loads_all_threads=0
+progress_interval=0
+system=system
+cpu_id=0
+itb=system.cpu.itb
+dtb=system.cpu.dtb
+profile=0
+clock=1
+defer_registration=false
+width=1
+function_trace=false
+function_trace_start=0
+simulate_stalls=false
+
+[system.intrctrl]
+type=IntrControl
+cpu=system.cpu
+
+[system.tsunami]
+type=Tsunami
+system=system
+intrctrl=system.intrctrl
+
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[system.bridge]
type=Bridge
@@ -75,38 +119,6 @@ image=system.disk2.image
driveID=master
delay=2000
-[system.cpu.itb]
-type=AlphaITB
-size=48
-
-[system.cpu.dtb]
-type=AlphaDTB
-size=64
-
-[system.cpu]
-type=AtomicSimpleCPU
-max_insts_any_thread=0
-max_insts_all_threads=0
-max_loads_any_thread=0
-max_loads_all_threads=0
-progress_interval=0
-mem=system.physmem
-system=system
-cpu_id=0
-itb=system.cpu.itb
-dtb=system.cpu.dtb
-profile=0
-clock=1
-defer_registration=false
-width=1
-function_trace=false
-function_trace_start=0
-simulate_stalls=false
-
-[system.intrctrl]
-type=IntrControl
-cpu=system.cpu
-
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
@@ -117,11 +129,6 @@ type=SimpleDisk
system=system
disk=system.simple_disk.disk
-[system.tsunami]
-type=Tsunami
-system=system
-intrctrl=system.intrctrl
-
[system.tsunami.fake_uart1]
type=IsaFake
pio_addr=8804615848696
@@ -467,6 +474,14 @@ type=Bus
bus_id=0
clock=2
width=64
+responder_set=true
+
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[trace]
flags=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
index e276e91a7..d68921f1e 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1389289 # Simulator instruction rate (inst/s)
-host_mem_usage 197652 # Number of bytes of host memory used
-host_seconds 44.48 # Real time elapsed on the host
-host_tick_rate 81712411 # Simulator tick rate (ticks/s)
+host_inst_rate 1313531 # Simulator instruction rate (inst/s)
+host_mem_usage 199136 # Number of bytes of host memory used
+host_seconds 47.04 # Real time elapsed on the host
+host_tick_rate 77256650 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
sim_insts 61788439 # Number of instructions simulated
sim_seconds 1.817090 # Number of seconds simulated
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr
index 6204251a5..dbafd6309 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stderr
@@ -1,3 +1,7 @@
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3456
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
index bb7f4ca1e..3929194fc 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-atomic/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 8 2006 21:57:24
-M5 started Sun Oct 8 21:57:28 2006
-M5 executing on zed.eecs.umich.edu
+M5 compiled Nov 5 2006 19:41:29
+M5 started Sun Nov 5 20:03:49 2006
+M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-atomic tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-atomic
Exiting @ tick 3634179176 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
index 9976e053a..f9a926d79 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.ini
@@ -85,7 +85,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
system=system
@@ -114,7 +113,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
system=system
@@ -173,19 +171,39 @@ cpu=system.cpu0
[system.iobus]
type=Bus
+children=responder
bus_id=0
clock=2
+responder_set=true
width=64
default=system.tsunami.pciconfig.pio
port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio system.tsunami.fake_sm_chip.pio system.tsunami.fake_uart1.pio system.tsunami.fake_uart2.pio system.tsunami.fake_uart3.pio system.tsunami.fake_uart4.pio system.tsunami.fake_ppc.pio system.tsunami.fake_OROM.pio system.tsunami.fake_pnp_addr.pio system.tsunami.fake_pnp_write.pio system.tsunami.fake_pnp_read0.pio system.tsunami.fake_pnp_read1.pio system.tsunami.fake_pnp_read2.pio system.tsunami.fake_pnp_read3.pio system.tsunami.fake_pnp_read4.pio system.tsunami.fake_pnp_read5.pio system.tsunami.fake_pnp_read6.pio system.tsunami.fake_pnp_read7.pio system.tsunami.fake_ata0.pio system.tsunami.fake_ata1.pio system.tsunami.fb.pio system.tsunami.io.pio system.tsunami.uart.pio system.tsunami.console.pio system.tsunami.ide.pio system.tsunami.ethernet.pio system.tsunami.ethernet.config system.tsunami.ethernet.dma system.tsunami.ide.config system.tsunami.ide.dma
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+
[system.membus]
type=Bus
+children=responder
bus_id=1
clock=2
+responder_set=false
width=64
+default=system.membus.responder.pio
port=system.bridge.side_b system.physmem.port system.cpu0.icache_port system.cpu0.dcache_port system.cpu1.icache_port system.cpu1.dcache_port
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+pio=system.membus.default
+
[system.physmem]
type=PhysicalMemory
file=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
index 9e4bfb566..5391d8e88 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/config.out
@@ -31,6 +31,50 @@ type=Bus
bus_id=1
clock=2
width=64
+responder_set=false
+
+[system.cpu0.itb]
+type=AlphaITB
+size=48
+
+[system.cpu0.dtb]
+type=AlphaDTB
+size=64
+
+[system.cpu0]
+type=TimingSimpleCPU
+max_insts_any_thread=0
+max_insts_all_threads=0
+max_loads_any_thread=0
+max_loads_all_threads=0
+progress_interval=0
+system=system
+cpu_id=0
+itb=system.cpu0.itb
+dtb=system.cpu0.dtb
+profile=0
+clock=1
+defer_registration=false
+// width not specified
+function_trace=false
+function_trace_start=0
+// simulate_stalls not specified
+
+[system.intrctrl]
+type=IntrControl
+cpu=system.cpu0
+
+[system.tsunami]
+type=Tsunami
+system=system
+intrctrl=system.intrctrl
+
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[system.bridge]
type=Bridge
@@ -75,34 +119,6 @@ image=system.disk2.image
driveID=master
delay=2000
-[system.cpu0.itb]
-type=AlphaITB
-size=48
-
-[system.cpu0.dtb]
-type=AlphaDTB
-size=64
-
-[system.cpu0]
-type=TimingSimpleCPU
-max_insts_any_thread=0
-max_insts_all_threads=0
-max_loads_any_thread=0
-max_loads_all_threads=0
-progress_interval=0
-mem=system.physmem
-system=system
-cpu_id=0
-itb=system.cpu0.itb
-dtb=system.cpu0.dtb
-profile=0
-clock=1
-defer_registration=false
-// width not specified
-function_trace=false
-function_trace_start=0
-// simulate_stalls not specified
-
[system.cpu1.itb]
type=AlphaITB
size=48
@@ -118,7 +134,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.physmem
system=system
cpu_id=1
itb=system.cpu1.itb
@@ -131,10 +146,6 @@ function_trace=false
function_trace_start=0
// simulate_stalls not specified
-[system.intrctrl]
-type=IntrControl
-cpu=system.cpu0
-
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
@@ -145,11 +156,6 @@ type=SimpleDisk
system=system
disk=system.simple_disk.disk
-[system.tsunami]
-type=Tsunami
-system=system
-intrctrl=system.intrctrl
-
[system.tsunami.fake_uart1]
type=IsaFake
pio_addr=8804615848696
@@ -495,6 +501,14 @@ type=Bus
bus_id=0
clock=2
width=64
+responder_set=true
+
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[trace]
flags=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
index 27adebb82..ceae1faaf 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/console.system.sim_console
@@ -17,8 +17,8 @@ M5 console: m5AlphaAccess @ 0xFFFFFD0200000000
unix_boot_mem ends at FFFFFC0000078000
k_argc = 0
jumping to kernel at 0xFFFFFC0000310000, (PCBB 0xFFFFFC0000018180 pfn 1067)
- CallbackFixup 0 18000, t7=FFFFFC000070C000
Entering slaveloop for cpu 1 my_rpb=FFFFFC0000018400
+ CallbackFixup 0 18000, t7=FFFFFC000070C000
Linux version 2.6.13 (hsul@zed.eecs.umich.edu) (gcc version 3.4.3) #1 SMP Sun Oct 8 19:52:07 EDT 2006
Booting GENERIC on Tsunami variation DP264 using machine vector DP264 from SRM
Major Options: SMP LEGACY_START VERBOSE_MCHECK
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
index 3f540d0ea..64a8cd99b 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/m5stats.txt
@@ -1,225 +1,226 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 255147 # Simulator instruction rate (inst/s)
-host_mem_usage 198260 # Number of bytes of host memory used
-host_seconds 260.00 # Real time elapsed on the host
-host_tick_rate 14365182 # Simulator tick rate (ticks/s)
+host_inst_rate 341883 # Simulator instruction rate (inst/s)
+host_mem_usage 198856 # Number of bytes of host memory used
+host_seconds 195.81 # Real time elapsed on the host
+host_tick_rate 20274403 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 66337257 # Number of instructions simulated
-sim_seconds 1.867449 # Number of seconds simulated
-sim_ticks 3734898877 # Number of ticks simulated
-system.cpu0.dtb.accesses 828318 # DTB accesses
-system.cpu0.dtb.acv 315 # DTB access violations
-system.cpu0.dtb.hits 13264910 # DTB hits
-system.cpu0.dtb.misses 7094 # DTB misses
-system.cpu0.dtb.read_accesses 572336 # DTB read accesses
-system.cpu0.dtb.read_acv 200 # DTB read access violations
-system.cpu0.dtb.read_hits 8201218 # DTB read hits
-system.cpu0.dtb.read_misses 6394 # DTB read misses
-system.cpu0.dtb.write_accesses 255982 # DTB write accesses
+sim_insts 66945470 # Number of instructions simulated
+sim_seconds 1.985009 # Number of seconds simulated
+sim_ticks 3970017178 # Number of ticks simulated
+system.cpu0.dtb.accesses 1003481 # DTB accesses
+system.cpu0.dtb.acv 289 # DTB access violations
+system.cpu0.dtb.hits 13332675 # DTB hits
+system.cpu0.dtb.misses 8437 # DTB misses
+system.cpu0.dtb.read_accesses 695694 # DTB read accesses
+system.cpu0.dtb.read_acv 174 # DTB read access violations
+system.cpu0.dtb.read_hits 8285791 # DTB read hits
+system.cpu0.dtb.read_misses 7640 # DTB read misses
+system.cpu0.dtb.write_accesses 307787 # DTB write accesses
system.cpu0.dtb.write_acv 115 # DTB write access violations
-system.cpu0.dtb.write_hits 5063692 # DTB write hits
-system.cpu0.dtb.write_misses 700 # DTB write misses
-system.cpu0.idle_fraction 0.982517 # Percentage of idle cycles
-system.cpu0.itb.accesses 1888651 # ITB accesses
-system.cpu0.itb.acv 166 # ITB acv
-system.cpu0.itb.hits 1885318 # ITB hits
-system.cpu0.itb.misses 3333 # ITB misses
-system.cpu0.kern.callpal 146863 # number of callpals executed
+system.cpu0.dtb.write_hits 5046884 # DTB write hits
+system.cpu0.dtb.write_misses 797 # DTB write misses
+system.cpu0.idle_fraction 0.928150 # Percentage of idle cycles
+system.cpu0.itb.accesses 2398201 # ITB accesses
+system.cpu0.itb.acv 143 # ITB acv
+system.cpu0.itb.hits 2394377 # ITB hits
+system.cpu0.itb.misses 3824 # ITB misses
+system.cpu0.kern.callpal 144637 # number of callpals executed
system.cpu0.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu0.kern.callpal_wripir 506 0.34% 0.35% # number of callpals executed
-system.cpu0.kern.callpal_wrmces 1 0.00% 0.35% # number of callpals executed
-system.cpu0.kern.callpal_wrfen 1 0.00% 0.35% # number of callpals executed
-system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.35% # number of callpals executed
-system.cpu0.kern.callpal_swpctx 2962 2.02% 2.36% # number of callpals executed
-system.cpu0.kern.callpal_tbi 47 0.03% 2.40% # number of callpals executed
-system.cpu0.kern.callpal_wrent 7 0.00% 2.40% # number of callpals executed
-system.cpu0.kern.callpal_swpipl 132443 90.18% 92.58% # number of callpals executed
-system.cpu0.kern.callpal_rdps 6236 4.25% 96.83% # number of callpals executed
-system.cpu0.kern.callpal_wrkgp 1 0.00% 96.83% # number of callpals executed
-system.cpu0.kern.callpal_wrusp 2 0.00% 96.83% # number of callpals executed
-system.cpu0.kern.callpal_rdusp 8 0.01% 96.84% # number of callpals executed
-system.cpu0.kern.callpal_whami 2 0.00% 96.84% # number of callpals executed
-system.cpu0.kern.callpal_rti 4200 2.86% 99.70% # number of callpals executed
-system.cpu0.kern.callpal_callsys 317 0.22% 99.91% # number of callpals executed
-system.cpu0.kern.callpal_imb 128 0.09% 100.00% # number of callpals executed
+system.cpu0.kern.callpal_wripir 571 0.39% 0.40% # number of callpals executed
+system.cpu0.kern.callpal_wrmces 1 0.00% 0.40% # number of callpals executed
+system.cpu0.kern.callpal_wrfen 1 0.00% 0.40% # number of callpals executed
+system.cpu0.kern.callpal_wrvptptr 1 0.00% 0.40% # number of callpals executed
+system.cpu0.kern.callpal_swpctx 2907 2.01% 2.41% # number of callpals executed
+system.cpu0.kern.callpal_tbi 44 0.03% 2.44% # number of callpals executed
+system.cpu0.kern.callpal_wrent 7 0.00% 2.44% # number of callpals executed
+system.cpu0.kern.callpal_swpipl 129633 89.63% 92.07% # number of callpals executed
+system.cpu0.kern.callpal_rdps 6650 4.60% 96.67% # number of callpals executed
+system.cpu0.kern.callpal_wrkgp 1 0.00% 96.67% # number of callpals executed
+system.cpu0.kern.callpal_wrusp 4 0.00% 96.67% # number of callpals executed
+system.cpu0.kern.callpal_rdusp 7 0.00% 96.68% # number of callpals executed
+system.cpu0.kern.callpal_whami 2 0.00% 96.68% # number of callpals executed
+system.cpu0.kern.callpal_rti 4286 2.96% 99.64% # number of callpals executed
+system.cpu0.kern.callpal_callsys 372 0.26% 99.90% # number of callpals executed
+system.cpu0.kern.callpal_imb 149 0.10% 100.00% # number of callpals executed
system.cpu0.kern.inst.arm 0 # number of arm instructions executed
-system.cpu0.kern.inst.hwrei 160332 # number of hwrei instructions executed
+system.cpu0.kern.inst.hwrei 159963 # number of hwrei instructions executed
system.cpu0.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu0.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu0.kern.inst.quiesce 6637 # number of quiesce instructions executed
-system.cpu0.kern.ipl_count 139203 # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_0 55744 40.05% 40.05% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_21 245 0.18% 40.22% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_22 1904 1.37% 41.59% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_30 410 0.29% 41.88% # number of times we switched to this ipl
-system.cpu0.kern.ipl_count_31 80900 58.12% 100.00% # number of times we switched to this ipl
-system.cpu0.kern.ipl_good 112527 # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_0 55189 49.05% 49.05% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_21 245 0.22% 49.26% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_22 1904 1.69% 50.95% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_30 410 0.36% 51.32% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_good_31 54779 48.68% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu0.kern.ipl_ticks 3734378988 # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_0 3696326531 98.98% 98.98% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_21 53683 0.00% 98.98% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_22 224672 0.01% 98.99% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_30 128286 0.00% 98.99% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_ticks_31 37645816 1.01% 100.00% # number of cycles we spent at this ipl
-system.cpu0.kern.ipl_used 0.808366 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_0 0.990044 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.inst.quiesce 6648 # number of quiesce instructions executed
+system.cpu0.kern.ipl_count 136551 # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_0 54497 39.91% 39.91% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_21 143 0.10% 40.01% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_22 2005 1.47% 41.48% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_30 483 0.35% 41.84% # number of times we switched to this ipl
+system.cpu0.kern.ipl_count_31 79423 58.16% 100.00% # number of times we switched to this ipl
+system.cpu0.kern.ipl_good 110306 # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_0 54079 49.03% 49.03% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_21 143 0.13% 49.16% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_22 2005 1.82% 50.97% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_30 483 0.44% 51.41% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_good_31 53596 48.59% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu0.kern.ipl_ticks 3970015394 # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_0 3836129328 96.63% 96.63% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_21 133000 0.00% 96.63% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_22 1870128 0.05% 96.68% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_30 1206048 0.03% 96.71% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_ticks_31 130676890 3.29% 100.00% # number of cycles we spent at this ipl
+system.cpu0.kern.ipl_used 0.807801 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.ipl_used_0 0.992330 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu0.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.ipl_used_31 0.677120 # fraction of swpipl calls that actually changed the ipl
-system.cpu0.kern.mode_good_kernel 1095
-system.cpu0.kern.mode_good_user 1095
+system.cpu0.kern.ipl_used_31 0.674817 # fraction of swpipl calls that actually changed the ipl
+system.cpu0.kern.mode_good_kernel 1253
+system.cpu0.kern.mode_good_user 1254
system.cpu0.kern.mode_good_idle 0
-system.cpu0.kern.mode_switch_kernel 6628 # number of protection mode switches
-system.cpu0.kern.mode_switch_user 1095 # number of protection mode switches
+system.cpu0.kern.mode_switch_kernel 6799 # number of protection mode switches
+system.cpu0.kern.mode_switch_user 1254 # number of protection mode switches
system.cpu0.kern.mode_switch_idle 0 # number of protection mode switches
-system.cpu0.kern.mode_switch_good 0.283569 # fraction of useful protection mode switches
-system.cpu0.kern.mode_switch_good_kernel 0.165208 # fraction of useful protection mode switches
+system.cpu0.kern.mode_switch_good 0.311313 # fraction of useful protection mode switches
+system.cpu0.kern.mode_switch_good_kernel 0.184292 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
system.cpu0.kern.mode_switch_good_idle <err: div-0> # fraction of useful protection mode switches
-system.cpu0.kern.mode_ticks_kernel 3730042316 99.93% 99.93% # number of ticks spent at the given mode
-system.cpu0.kern.mode_ticks_user 2718822 0.07% 100.00% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_kernel 3956260432 99.65% 99.65% # number of ticks spent at the given mode
+system.cpu0.kern.mode_ticks_user 13754954 0.35% 100.00% # number of ticks spent at the given mode
system.cpu0.kern.mode_ticks_idle 0 0.00% 100.00% # number of ticks spent at the given mode
-system.cpu0.kern.swap_context 2963 # number of times the context was actually changed
-system.cpu0.kern.syscall 179 # number of syscalls executed
-system.cpu0.kern.syscall_fork 7 3.91% 3.91% # number of syscalls executed
-system.cpu0.kern.syscall_read 14 7.82% 11.73% # number of syscalls executed
-system.cpu0.kern.syscall_write 4 2.23% 13.97% # number of syscalls executed
-system.cpu0.kern.syscall_close 27 15.08% 29.05% # number of syscalls executed
-system.cpu0.kern.syscall_chdir 1 0.56% 29.61% # number of syscalls executed
-system.cpu0.kern.syscall_obreak 6 3.35% 32.96% # number of syscalls executed
-system.cpu0.kern.syscall_lseek 7 3.91% 36.87% # number of syscalls executed
-system.cpu0.kern.syscall_getpid 4 2.23% 39.11% # number of syscalls executed
-system.cpu0.kern.syscall_setuid 1 0.56% 39.66% # number of syscalls executed
-system.cpu0.kern.syscall_getuid 3 1.68% 41.34% # number of syscalls executed
-system.cpu0.kern.syscall_access 6 3.35% 44.69% # number of syscalls executed
-system.cpu0.kern.syscall_dup 2 1.12% 45.81% # number of syscalls executed
-system.cpu0.kern.syscall_open 30 16.76% 62.57% # number of syscalls executed
-system.cpu0.kern.syscall_getgid 3 1.68% 64.25% # number of syscalls executed
-system.cpu0.kern.syscall_sigprocmask 8 4.47% 68.72% # number of syscalls executed
-system.cpu0.kern.syscall_ioctl 8 4.47% 73.18% # number of syscalls executed
-system.cpu0.kern.syscall_execve 5 2.79% 75.98% # number of syscalls executed
-system.cpu0.kern.syscall_mmap 17 9.50% 85.47% # number of syscalls executed
-system.cpu0.kern.syscall_munmap 3 1.68% 87.15% # number of syscalls executed
-system.cpu0.kern.syscall_mprotect 4 2.23% 89.39% # number of syscalls executed
-system.cpu0.kern.syscall_gethostname 1 0.56% 89.94% # number of syscalls executed
-system.cpu0.kern.syscall_dup2 2 1.12% 91.06% # number of syscalls executed
-system.cpu0.kern.syscall_fcntl 8 4.47% 95.53% # number of syscalls executed
-system.cpu0.kern.syscall_socket 2 1.12% 96.65% # number of syscalls executed
-system.cpu0.kern.syscall_connect 2 1.12% 97.77% # number of syscalls executed
-system.cpu0.kern.syscall_setgid 1 0.56% 98.32% # number of syscalls executed
-system.cpu0.kern.syscall_getrlimit 1 0.56% 98.88% # number of syscalls executed
-system.cpu0.kern.syscall_setsid 2 1.12% 100.00% # number of syscalls executed
-system.cpu0.not_idle_fraction 0.017483 # Percentage of non-idle cycles
-system.cpu0.numCycles 3734379018 # number of cpu cycles simulated
-system.cpu0.num_insts 51973218 # Number of instructions executed
-system.cpu0.num_refs 13496062 # Number of memory references
-system.cpu1.dtb.accesses 477041 # DTB accesses
-system.cpu1.dtb.acv 52 # DTB access violations
-system.cpu1.dtb.hits 4561390 # DTB hits
-system.cpu1.dtb.misses 4359 # DTB misses
-system.cpu1.dtb.read_accesses 328551 # DTB read accesses
-system.cpu1.dtb.read_acv 10 # DTB read access violations
-system.cpu1.dtb.read_hits 2657400 # DTB read hits
-system.cpu1.dtb.read_misses 3911 # DTB read misses
-system.cpu1.dtb.write_accesses 148490 # DTB write accesses
-system.cpu1.dtb.write_acv 42 # DTB write access violations
-system.cpu1.dtb.write_hits 1903990 # DTB write hits
-system.cpu1.dtb.write_misses 448 # DTB write misses
-system.cpu1.idle_fraction 0.994927 # Percentage of idle cycles
-system.cpu1.itb.accesses 1392687 # ITB accesses
-system.cpu1.itb.acv 18 # ITB acv
-system.cpu1.itb.hits 1391015 # ITB hits
-system.cpu1.itb.misses 1672 # ITB misses
-system.cpu1.kern.callpal 74370 # number of callpals executed
+system.cpu0.kern.swap_context 2908 # number of times the context was actually changed
+system.cpu0.kern.syscall 227 # number of syscalls executed
+system.cpu0.kern.syscall_fork 6 2.64% 2.64% # number of syscalls executed
+system.cpu0.kern.syscall_read 19 8.37% 11.01% # number of syscalls executed
+system.cpu0.kern.syscall_write 3 1.32% 12.33% # number of syscalls executed
+system.cpu0.kern.syscall_close 31 13.66% 25.99% # number of syscalls executed
+system.cpu0.kern.syscall_chdir 1 0.44% 26.43% # number of syscalls executed
+system.cpu0.kern.syscall_chmod 1 0.44% 26.87% # number of syscalls executed
+system.cpu0.kern.syscall_obreak 10 4.41% 31.28% # number of syscalls executed
+system.cpu0.kern.syscall_lseek 6 2.64% 33.92% # number of syscalls executed
+system.cpu0.kern.syscall_getpid 4 1.76% 35.68% # number of syscalls executed
+system.cpu0.kern.syscall_setuid 2 0.88% 36.56% # number of syscalls executed
+system.cpu0.kern.syscall_getuid 4 1.76% 38.33% # number of syscalls executed
+system.cpu0.kern.syscall_access 8 3.52% 41.85% # number of syscalls executed
+system.cpu0.kern.syscall_dup 2 0.88% 42.73% # number of syscalls executed
+system.cpu0.kern.syscall_open 40 17.62% 60.35% # number of syscalls executed
+system.cpu0.kern.syscall_getgid 4 1.76% 62.11% # number of syscalls executed
+system.cpu0.kern.syscall_sigprocmask 7 3.08% 65.20% # number of syscalls executed
+system.cpu0.kern.syscall_ioctl 9 3.96% 69.16% # number of syscalls executed
+system.cpu0.kern.syscall_readlink 1 0.44% 69.60% # number of syscalls executed
+system.cpu0.kern.syscall_execve 5 2.20% 71.81% # number of syscalls executed
+system.cpu0.kern.syscall_mmap 32 14.10% 85.90% # number of syscalls executed
+system.cpu0.kern.syscall_munmap 3 1.32% 87.22% # number of syscalls executed
+system.cpu0.kern.syscall_mprotect 9 3.96% 91.19% # number of syscalls executed
+system.cpu0.kern.syscall_gethostname 1 0.44% 91.63% # number of syscalls executed
+system.cpu0.kern.syscall_dup2 2 0.88% 92.51% # number of syscalls executed
+system.cpu0.kern.syscall_fcntl 8 3.52% 96.04% # number of syscalls executed
+system.cpu0.kern.syscall_socket 2 0.88% 96.92% # number of syscalls executed
+system.cpu0.kern.syscall_connect 2 0.88% 97.80% # number of syscalls executed
+system.cpu0.kern.syscall_setgid 2 0.88% 98.68% # number of syscalls executed
+system.cpu0.kern.syscall_getrlimit 1 0.44% 99.12% # number of syscalls executed
+system.cpu0.kern.syscall_setsid 2 0.88% 100.00% # number of syscalls executed
+system.cpu0.not_idle_fraction 0.071850 # Percentage of non-idle cycles
+system.cpu0.numCycles 3970017178 # number of cpu cycles simulated
+system.cpu0.num_insts 52312134 # Number of instructions executed
+system.cpu0.num_refs 13564902 # Number of memory references
+system.cpu1.dtb.accesses 302962 # DTB accesses
+system.cpu1.dtb.acv 84 # DTB access violations
+system.cpu1.dtb.hits 4635665 # DTB hits
+system.cpu1.dtb.misses 3107 # DTB misses
+system.cpu1.dtb.read_accesses 205912 # DTB read accesses
+system.cpu1.dtb.read_acv 36 # DTB read access violations
+system.cpu1.dtb.read_hits 2664909 # DTB read hits
+system.cpu1.dtb.read_misses 2747 # DTB read misses
+system.cpu1.dtb.write_accesses 97050 # DTB write accesses
+system.cpu1.dtb.write_acv 48 # DTB write access violations
+system.cpu1.dtb.write_hits 1970756 # DTB write hits
+system.cpu1.dtb.write_misses 360 # DTB write misses
+system.cpu1.idle_fraction 0.974941 # Percentage of idle cycles
+system.cpu1.itb.accesses 885878 # ITB accesses
+system.cpu1.itb.acv 41 # ITB acv
+system.cpu1.itb.hits 884631 # ITB hits
+system.cpu1.itb.misses 1247 # ITB misses
+system.cpu1.kern.callpal 80664 # number of callpals executed
system.cpu1.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
-system.cpu1.kern.callpal_wripir 410 0.55% 0.55% # number of callpals executed
-system.cpu1.kern.callpal_wrmces 1 0.00% 0.55% # number of callpals executed
-system.cpu1.kern.callpal_wrfen 1 0.00% 0.56% # number of callpals executed
-system.cpu1.kern.callpal_swpctx 2102 2.83% 3.38% # number of callpals executed
-system.cpu1.kern.callpal_tbi 6 0.01% 3.39% # number of callpals executed
-system.cpu1.kern.callpal_wrent 7 0.01% 3.40% # number of callpals executed
-system.cpu1.kern.callpal_swpipl 65072 87.50% 90.90% # number of callpals executed
-system.cpu1.kern.callpal_rdps 2603 3.50% 94.40% # number of callpals executed
-system.cpu1.kern.callpal_wrkgp 1 0.00% 94.40% # number of callpals executed
-system.cpu1.kern.callpal_wrusp 5 0.01% 94.41% # number of callpals executed
-system.cpu1.kern.callpal_rdusp 1 0.00% 94.41% # number of callpals executed
-system.cpu1.kern.callpal_whami 3 0.00% 94.41% # number of callpals executed
-system.cpu1.kern.callpal_rti 3890 5.23% 99.64% # number of callpals executed
-system.cpu1.kern.callpal_callsys 214 0.29% 99.93% # number of callpals executed
-system.cpu1.kern.callpal_imb 52 0.07% 100.00% # number of callpals executed
+system.cpu1.kern.callpal_wripir 483 0.60% 0.60% # number of callpals executed
+system.cpu1.kern.callpal_wrmces 1 0.00% 0.60% # number of callpals executed
+system.cpu1.kern.callpal_wrfen 1 0.00% 0.60% # number of callpals executed
+system.cpu1.kern.callpal_swpctx 2277 2.82% 3.43% # number of callpals executed
+system.cpu1.kern.callpal_tbi 10 0.01% 3.44% # number of callpals executed
+system.cpu1.kern.callpal_wrent 7 0.01% 3.45% # number of callpals executed
+system.cpu1.kern.callpal_swpipl 71260 88.34% 91.79% # number of callpals executed
+system.cpu1.kern.callpal_rdps 2378 2.95% 94.74% # number of callpals executed
+system.cpu1.kern.callpal_wrkgp 1 0.00% 94.74% # number of callpals executed
+system.cpu1.kern.callpal_wrusp 3 0.00% 94.74% # number of callpals executed
+system.cpu1.kern.callpal_rdusp 2 0.00% 94.74% # number of callpals executed
+system.cpu1.kern.callpal_whami 3 0.00% 94.75% # number of callpals executed
+system.cpu1.kern.callpal_rti 4044 5.01% 99.76% # number of callpals executed
+system.cpu1.kern.callpal_callsys 161 0.20% 99.96% # number of callpals executed
+system.cpu1.kern.callpal_imb 31 0.04% 100.00% # number of callpals executed
system.cpu1.kern.callpal_rdunique 1 0.00% 100.00% # number of callpals executed
system.cpu1.kern.inst.arm 0 # number of arm instructions executed
-system.cpu1.kern.inst.hwrei 82881 # number of hwrei instructions executed
+system.cpu1.kern.inst.hwrei 87713 # number of hwrei instructions executed
system.cpu1.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu1.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu1.kern.inst.quiesce 2511 # number of quiesce instructions executed
-system.cpu1.kern.ipl_count 71371 # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_0 27750 38.88% 38.88% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_22 1902 2.66% 41.55% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_30 506 0.71% 42.26% # number of times we switched to this ipl
-system.cpu1.kern.ipl_count_31 41213 57.74% 100.00% # number of times we switched to this ipl
-system.cpu1.kern.ipl_good 55758 # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_0 26928 48.29% 48.29% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_22 1902 3.41% 51.71% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_30 506 0.91% 52.61% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_good_31 26422 47.39% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu1.kern.ipl_ticks 3734898431 # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_0 3704872588 99.20% 99.20% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_22 224436 0.01% 99.20% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_30 162482 0.00% 99.21% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_ticks_31 29638925 0.79% 100.00% # number of cycles we spent at this ipl
-system.cpu1.kern.ipl_used 0.781242 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_0 0.970378 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.inst.quiesce 2740 # number of quiesce instructions executed
+system.cpu1.kern.ipl_count 77873 # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_0 30259 38.86% 38.86% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_22 1997 2.56% 41.42% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_30 571 0.73% 42.15% # number of times we switched to this ipl
+system.cpu1.kern.ipl_count_31 45046 57.85% 100.00% # number of times we switched to this ipl
+system.cpu1.kern.ipl_good 60597 # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_0 29300 48.35% 48.35% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_22 1997 3.30% 51.65% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_30 571 0.94% 52.59% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_good_31 28729 47.41% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu1.kern.ipl_ticks 3968771896 # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_0 3847181696 96.94% 96.94% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_22 1867354 0.05% 96.98% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_30 1457952 0.04% 97.02% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_ticks_31 118264894 2.98% 100.00% # number of cycles we spent at this ipl
+system.cpu1.kern.ipl_used 0.778152 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.ipl_used_0 0.968307 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
system.cpu1.kern.ipl_used_30 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.ipl_used_31 0.641108 # fraction of swpipl calls that actually changed the ipl
-system.cpu1.kern.mode_good_kernel 1093
-system.cpu1.kern.mode_good_user 662
-system.cpu1.kern.mode_good_idle 431
-system.cpu1.kern.mode_switch_kernel 2354 # number of protection mode switches
-system.cpu1.kern.mode_switch_user 662 # number of protection mode switches
-system.cpu1.kern.mode_switch_idle 2830 # number of protection mode switches
-system.cpu1.kern.mode_switch_good 0.373931 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_kernel 0.464316 # fraction of useful protection mode switches
+system.cpu1.kern.ipl_used_31 0.637770 # fraction of swpipl calls that actually changed the ipl
+system.cpu1.kern.mode_good_kernel 1013
+system.cpu1.kern.mode_good_user 518
+system.cpu1.kern.mode_good_idle 495
+system.cpu1.kern.mode_switch_kernel 2345 # number of protection mode switches
+system.cpu1.kern.mode_switch_user 518 # number of protection mode switches
+system.cpu1.kern.mode_switch_idle 3028 # number of protection mode switches
+system.cpu1.kern.mode_switch_good 0.343914 # fraction of useful protection mode switches
+system.cpu1.kern.mode_switch_good_kernel 0.431983 # fraction of useful protection mode switches
system.cpu1.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu1.kern.mode_switch_good_idle 0.152297 # fraction of useful protection mode switches
-system.cpu1.kern.mode_ticks_kernel 13359666 0.36% 0.36% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_user 1967356 0.05% 0.41% # number of ticks spent at the given mode
-system.cpu1.kern.mode_ticks_idle 3719571407 99.59% 100.00% # number of ticks spent at the given mode
-system.cpu1.kern.swap_context 2103 # number of times the context was actually changed
-system.cpu1.kern.syscall 150 # number of syscalls executed
-system.cpu1.kern.syscall_fork 1 0.67% 0.67% # number of syscalls executed
-system.cpu1.kern.syscall_read 16 10.67% 11.33% # number of syscalls executed
-system.cpu1.kern.syscall_close 16 10.67% 22.00% # number of syscalls executed
-system.cpu1.kern.syscall_chmod 1 0.67% 22.67% # number of syscalls executed
-system.cpu1.kern.syscall_obreak 9 6.00% 28.67% # number of syscalls executed
-system.cpu1.kern.syscall_lseek 3 2.00% 30.67% # number of syscalls executed
-system.cpu1.kern.syscall_getpid 2 1.33% 32.00% # number of syscalls executed
-system.cpu1.kern.syscall_setuid 3 2.00% 34.00% # number of syscalls executed
-system.cpu1.kern.syscall_getuid 3 2.00% 36.00% # number of syscalls executed
-system.cpu1.kern.syscall_access 5 3.33% 39.33% # number of syscalls executed
-system.cpu1.kern.syscall_open 25 16.67% 56.00% # number of syscalls executed
-system.cpu1.kern.syscall_getgid 3 2.00% 58.00% # number of syscalls executed
-system.cpu1.kern.syscall_sigprocmask 2 1.33% 59.33% # number of syscalls executed
-system.cpu1.kern.syscall_ioctl 2 1.33% 60.67% # number of syscalls executed
-system.cpu1.kern.syscall_readlink 1 0.67% 61.33% # number of syscalls executed
-system.cpu1.kern.syscall_execve 2 1.33% 62.67% # number of syscalls executed
-system.cpu1.kern.syscall_mmap 37 24.67% 87.33% # number of syscalls executed
-system.cpu1.kern.syscall_mprotect 12 8.00% 95.33% # number of syscalls executed
-system.cpu1.kern.syscall_dup2 1 0.67% 96.00% # number of syscalls executed
-system.cpu1.kern.syscall_fcntl 2 1.33% 97.33% # number of syscalls executed
-system.cpu1.kern.syscall_setgid 3 2.00% 99.33% # number of syscalls executed
-system.cpu1.kern.syscall_getrlimit 1 0.67% 100.00% # number of syscalls executed
-system.cpu1.not_idle_fraction 0.005073 # Percentage of non-idle cycles
-system.cpu1.numCycles 3734898877 # number of cpu cycles simulated
-system.cpu1.num_insts 14364039 # Number of instructions executed
-system.cpu1.num_refs 4590544 # Number of memory references
+system.cpu1.kern.mode_switch_good_idle 0.163474 # fraction of useful protection mode switches
+system.cpu1.kern.mode_ticks_kernel 63013938 1.59% 1.59% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_user 5102326 0.13% 1.72% # number of ticks spent at the given mode
+system.cpu1.kern.mode_ticks_idle 3899442912 98.28% 100.00% # number of ticks spent at the given mode
+system.cpu1.kern.swap_context 2278 # number of times the context was actually changed
+system.cpu1.kern.syscall 102 # number of syscalls executed
+system.cpu1.kern.syscall_fork 2 1.96% 1.96% # number of syscalls executed
+system.cpu1.kern.syscall_read 11 10.78% 12.75% # number of syscalls executed
+system.cpu1.kern.syscall_write 1 0.98% 13.73% # number of syscalls executed
+system.cpu1.kern.syscall_close 12 11.76% 25.49% # number of syscalls executed
+system.cpu1.kern.syscall_obreak 5 4.90% 30.39% # number of syscalls executed
+system.cpu1.kern.syscall_lseek 4 3.92% 34.31% # number of syscalls executed
+system.cpu1.kern.syscall_getpid 2 1.96% 36.27% # number of syscalls executed
+system.cpu1.kern.syscall_setuid 2 1.96% 38.24% # number of syscalls executed
+system.cpu1.kern.syscall_getuid 2 1.96% 40.20% # number of syscalls executed
+system.cpu1.kern.syscall_access 3 2.94% 43.14% # number of syscalls executed
+system.cpu1.kern.syscall_open 15 14.71% 57.84% # number of syscalls executed
+system.cpu1.kern.syscall_getgid 2 1.96% 59.80% # number of syscalls executed
+system.cpu1.kern.syscall_sigprocmask 3 2.94% 62.75% # number of syscalls executed
+system.cpu1.kern.syscall_ioctl 1 0.98% 63.73% # number of syscalls executed
+system.cpu1.kern.syscall_execve 2 1.96% 65.69% # number of syscalls executed
+system.cpu1.kern.syscall_mmap 22 21.57% 87.25% # number of syscalls executed
+system.cpu1.kern.syscall_mprotect 7 6.86% 94.12% # number of syscalls executed
+system.cpu1.kern.syscall_dup2 1 0.98% 95.10% # number of syscalls executed
+system.cpu1.kern.syscall_fcntl 2 1.96% 97.06% # number of syscalls executed
+system.cpu1.kern.syscall_setgid 2 1.96% 99.02% # number of syscalls executed
+system.cpu1.kern.syscall_getrlimit 1 0.98% 100.00% # number of syscalls executed
+system.cpu1.not_idle_fraction 0.025059 # Percentage of non-idle cycles
+system.cpu1.numCycles 3968772136 # number of cpu cycles simulated
+system.cpu1.num_insts 14633336 # Number of instructions executed
+system.cpu1.num_refs 4665250 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
index 64d80c0d2..bfd64fb2b 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stderr
@@ -1,6 +1,10 @@
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
Listening for console connection on port 3457
0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
0: system.remote_gdb.listener: listening for remote gdb #1 on port 7002
warn: Entering event queue @ 0. Starting simulation...
-warn: 271343: Trying to launch CPU number 1!
+warn: 1082476: Trying to launch CPU number 1!
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
index 0e22ad636..c97d4fc44 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing-dual/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 10 2006 01:59:16
-M5 started Tue Oct 10 02:09:13 2006
-M5 executing on zamp.eecs.umich.edu
+M5 compiled Nov 5 2006 19:41:29
+M5 started Sun Nov 5 20:04:42 2006
+M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing-dual
-Exiting @ tick 3734898877 because m5_exit instruction encountered
+Exiting @ tick 3970017178 because m5_exit instruction encountered
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
index 6514a6af7..17b05cd2b 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.ini
@@ -85,7 +85,6 @@ max_insts_all_threads=0
max_insts_any_thread=0
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
profile=0
progress_interval=0
system=system
@@ -144,19 +143,39 @@ cpu=system.cpu
[system.iobus]
type=Bus
+children=responder
bus_id=0
clock=2
+responder_set=true
width=64
default=system.tsunami.pciconfig.pio
port=system.bridge.side_a system.tsunami.cchip.pio system.tsunami.pchip.pio system.tsunami.fake_sm_chip.pio system.tsunami.fake_uart1.pio system.tsunami.fake_uart2.pio system.tsunami.fake_uart3.pio system.tsunami.fake_uart4.pio system.tsunami.fake_ppc.pio system.tsunami.fake_OROM.pio system.tsunami.fake_pnp_addr.pio system.tsunami.fake_pnp_write.pio system.tsunami.fake_pnp_read0.pio system.tsunami.fake_pnp_read1.pio system.tsunami.fake_pnp_read2.pio system.tsunami.fake_pnp_read3.pio system.tsunami.fake_pnp_read4.pio system.tsunami.fake_pnp_read5.pio system.tsunami.fake_pnp_read6.pio system.tsunami.fake_pnp_read7.pio system.tsunami.fake_ata0.pio system.tsunami.fake_ata1.pio system.tsunami.fb.pio system.tsunami.io.pio system.tsunami.uart.pio system.tsunami.console.pio system.tsunami.ide.pio system.tsunami.ethernet.pio system.tsunami.ethernet.config system.tsunami.ethernet.dma system.tsunami.ide.config system.tsunami.ide.dma
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+
[system.membus]
type=Bus
+children=responder
bus_id=1
clock=2
+responder_set=false
width=64
+default=system.membus.responder.pio
port=system.bridge.side_b system.physmem.port system.cpu.icache_port system.cpu.dcache_port
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
+pio=system.membus.default
+
[system.physmem]
type=PhysicalMemory
file=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
index 173819299..6f0210a4c 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/config.out
@@ -31,6 +31,50 @@ type=Bus
bus_id=1
clock=2
width=64
+responder_set=false
+
+[system.cpu.itb]
+type=AlphaITB
+size=48
+
+[system.cpu.dtb]
+type=AlphaDTB
+size=64
+
+[system.cpu]
+type=TimingSimpleCPU
+max_insts_any_thread=0
+max_insts_all_threads=0
+max_loads_any_thread=0
+max_loads_all_threads=0
+progress_interval=0
+system=system
+cpu_id=0
+itb=system.cpu.itb
+dtb=system.cpu.dtb
+profile=0
+clock=1
+defer_registration=false
+// width not specified
+function_trace=false
+function_trace_start=0
+// simulate_stalls not specified
+
+[system.intrctrl]
+type=IntrControl
+cpu=system.cpu
+
+[system.tsunami]
+type=Tsunami
+system=system
+intrctrl=system.intrctrl
+
+[system.membus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[system.bridge]
type=Bridge
@@ -75,38 +119,6 @@ image=system.disk2.image
driveID=master
delay=2000
-[system.cpu.itb]
-type=AlphaITB
-size=48
-
-[system.cpu.dtb]
-type=AlphaDTB
-size=64
-
-[system.cpu]
-type=TimingSimpleCPU
-max_insts_any_thread=0
-max_insts_all_threads=0
-max_loads_any_thread=0
-max_loads_all_threads=0
-progress_interval=0
-mem=system.physmem
-system=system
-cpu_id=0
-itb=system.cpu.itb
-dtb=system.cpu.dtb
-profile=0
-clock=1
-defer_registration=false
-// width not specified
-function_trace=false
-function_trace_start=0
-// simulate_stalls not specified
-
-[system.intrctrl]
-type=IntrControl
-cpu=system.cpu
-
[system.simple_disk.disk]
type=RawDiskImage
image_file=/dist/m5/system/disks/linux-latest.img
@@ -117,11 +129,6 @@ type=SimpleDisk
system=system
disk=system.simple_disk.disk
-[system.tsunami]
-type=Tsunami
-system=system
-intrctrl=system.intrctrl
-
[system.tsunami.fake_uart1]
type=IsaFake
pio_addr=8804615848696
@@ -467,6 +474,14 @@ type=Bus
bus_id=0
clock=2
width=64
+responder_set=true
+
+[system.iobus.responder]
+type=BadAddr
+pio_addr=0
+pio_latency=0
+platform=system.tsunami
+system=system
[trace]
flags=
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
index c126b03a3..6c7f8faed 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/m5stats.txt
@@ -1,86 +1,86 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 244619 # Simulator instruction rate (inst/s)
-host_mem_usage 197804 # Number of bytes of host memory used
-host_seconds 252.48 # Real time elapsed on the host
-host_tick_rate 14464234 # Simulator tick rate (ticks/s)
+host_inst_rate 351787 # Simulator instruction rate (inst/s)
+host_mem_usage 198432 # Number of bytes of host memory used
+host_seconds 175.87 # Real time elapsed on the host
+host_tick_rate 22032614 # Simulator tick rate (ticks/s)
sim_freq 2000000000 # Frequency of simulated ticks
-sim_insts 61760478 # Number of instructions simulated
-sim_seconds 1.825937 # Number of seconds simulated
-sim_ticks 3651873858 # Number of ticks simulated
-system.cpu.dtb.accesses 1304494 # DTB accesses
+sim_insts 61868161 # Number of instructions simulated
+sim_seconds 1.937422 # Number of seconds simulated
+sim_ticks 3874844018 # Number of ticks simulated
+system.cpu.dtb.accesses 1304554 # DTB accesses
system.cpu.dtb.acv 367 # DTB access violations
-system.cpu.dtb.hits 16545335 # DTB hits
-system.cpu.dtb.misses 11425 # DTB misses
-system.cpu.dtb.read_accesses 900425 # DTB read accesses
+system.cpu.dtb.hits 16566194 # DTB hits
+system.cpu.dtb.misses 11447 # DTB misses
+system.cpu.dtb.read_accesses 900486 # DTB read accesses
system.cpu.dtb.read_acv 210 # DTB read access violations
-system.cpu.dtb.read_hits 10034117 # DTB read hits
-system.cpu.dtb.read_misses 10280 # DTB read misses
-system.cpu.dtb.write_accesses 404069 # DTB write accesses
+system.cpu.dtb.read_hits 10048141 # DTB read hits
+system.cpu.dtb.read_misses 10303 # DTB read misses
+system.cpu.dtb.write_accesses 404068 # DTB write accesses
system.cpu.dtb.write_acv 157 # DTB write access violations
-system.cpu.dtb.write_hits 6511218 # DTB write hits
-system.cpu.dtb.write_misses 1145 # DTB write misses
-system.cpu.idle_fraction 0.978539 # Percentage of idle cycles
-system.cpu.itb.accesses 3281311 # ITB accesses
+system.cpu.dtb.write_hits 6518053 # DTB write hits
+system.cpu.dtb.write_misses 1144 # DTB write misses
+system.cpu.idle_fraction 0.918945 # Percentage of idle cycles
+system.cpu.itb.accesses 3281349 # ITB accesses
system.cpu.itb.acv 184 # ITB acv
-system.cpu.itb.hits 3276321 # ITB hits
-system.cpu.itb.misses 4990 # ITB misses
-system.cpu.kern.callpal 193987 # number of callpals executed
+system.cpu.itb.hits 3276346 # ITB hits
+system.cpu.itb.misses 5003 # ITB misses
+system.cpu.kern.callpal 195242 # number of callpals executed
system.cpu.kern.callpal_cserve 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrmces 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrfen 1 0.00% 0.00% # number of callpals executed
system.cpu.kern.callpal_wrvptptr 1 0.00% 0.00% # number of callpals executed
-system.cpu.kern.callpal_swpctx 4203 2.17% 2.17% # number of callpals executed
-system.cpu.kern.callpal_tbi 54 0.03% 2.20% # number of callpals executed
-system.cpu.kern.callpal_wrent 7 0.00% 2.20% # number of callpals executed
-system.cpu.kern.callpal_swpipl 176881 91.18% 93.38% # number of callpals executed
-system.cpu.kern.callpal_rdps 6888 3.55% 96.93% # number of callpals executed
-system.cpu.kern.callpal_wrkgp 1 0.00% 96.93% # number of callpals executed
-system.cpu.kern.callpal_wrusp 7 0.00% 96.94% # number of callpals executed
-system.cpu.kern.callpal_rdusp 9 0.00% 96.94% # number of callpals executed
-system.cpu.kern.callpal_whami 2 0.00% 96.94% # number of callpals executed
-system.cpu.kern.callpal_rti 5219 2.69% 99.63% # number of callpals executed
+system.cpu.kern.callpal_swpctx 4161 2.13% 2.13% # number of callpals executed
+system.cpu.kern.callpal_tbi 54 0.03% 2.16% # number of callpals executed
+system.cpu.kern.callpal_wrent 7 0.00% 2.16% # number of callpals executed
+system.cpu.kern.callpal_swpipl 178096 91.22% 93.38% # number of callpals executed
+system.cpu.kern.callpal_rdps 6977 3.57% 96.96% # number of callpals executed
+system.cpu.kern.callpal_wrkgp 1 0.00% 96.96% # number of callpals executed
+system.cpu.kern.callpal_wrusp 7 0.00% 96.96% # number of callpals executed
+system.cpu.kern.callpal_rdusp 9 0.00% 96.96% # number of callpals executed
+system.cpu.kern.callpal_whami 2 0.00% 96.97% # number of callpals executed
+system.cpu.kern.callpal_rti 5212 2.67% 99.64% # number of callpals executed
system.cpu.kern.callpal_callsys 531 0.27% 99.91% # number of callpals executed
system.cpu.kern.callpal_imb 181 0.09% 100.00% # number of callpals executed
system.cpu.kern.inst.arm 0 # number of arm instructions executed
-system.cpu.kern.inst.hwrei 213061 # number of hwrei instructions executed
+system.cpu.kern.inst.hwrei 214344 # number of hwrei instructions executed
system.cpu.kern.inst.ivlb 0 # number of ivlb instructions executed
system.cpu.kern.inst.ivle 0 # number of ivle instructions executed
-system.cpu.kern.inst.quiesce 6207 # number of quiesce instructions executed
-system.cpu.kern.ipl_count 184207 # number of times we switched to this ipl
-system.cpu.kern.ipl_count_0 75390 40.93% 40.93% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_21 245 0.13% 41.06% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_22 1861 1.01% 42.07% # number of times we switched to this ipl
-system.cpu.kern.ipl_count_31 106711 57.93% 100.00% # number of times we switched to this ipl
-system.cpu.kern.ipl_good 150152 # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_0 74023 49.30% 49.30% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_21 245 0.16% 49.46% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_22 1861 1.24% 50.70% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_good_31 74023 49.30% 100.00% # number of times we switched to this ipl from a different ipl
-system.cpu.kern.ipl_ticks 3651873412 # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_0 3611240657 98.89% 98.89% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_21 53683 0.00% 98.89% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_22 219598 0.01% 98.89% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_ticks_31 40359474 1.11% 100.00% # number of cycles we spent at this ipl
-system.cpu.kern.ipl_used 0.815126 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_0 0.981868 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.inst.quiesce 6112 # number of quiesce instructions executed
+system.cpu.kern.ipl_count 185408 # number of times we switched to this ipl
+system.cpu.kern.ipl_count_0 75624 40.79% 40.79% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_21 143 0.08% 40.87% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_22 1956 1.05% 41.92% # number of times we switched to this ipl
+system.cpu.kern.ipl_count_31 107685 58.08% 100.00% # number of times we switched to this ipl
+system.cpu.kern.ipl_good 150613 # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_0 74257 49.30% 49.30% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_21 143 0.09% 49.40% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_22 1956 1.30% 50.70% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_good_31 74257 49.30% 100.00% # number of times we switched to this ipl from a different ipl
+system.cpu.kern.ipl_ticks 3874842234 # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_0 3747190106 96.71% 96.71% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_21 122728 0.00% 96.71% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_22 915408 0.02% 96.73% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_ticks_31 126613992 3.27% 100.00% # number of cycles we spent at this ipl
+system.cpu.kern.ipl_used 0.812333 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.ipl_used_0 0.981924 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_21 1 # fraction of swpipl calls that actually changed the ipl
system.cpu.kern.ipl_used_22 1 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.ipl_used_31 0.693677 # fraction of swpipl calls that actually changed the ipl
-system.cpu.kern.mode_good_kernel 1934
-system.cpu.kern.mode_good_user 1754
-system.cpu.kern.mode_good_idle 180
-system.cpu.kern.mode_switch_kernel 5984 # number of protection mode switches
-system.cpu.kern.mode_switch_user 1754 # number of protection mode switches
-system.cpu.kern.mode_switch_idle 2104 # number of protection mode switches
-system.cpu.kern.mode_switch_good 0.393010 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_kernel 0.323195 # fraction of useful protection mode switches
+system.cpu.kern.ipl_used_31 0.689576 # fraction of swpipl calls that actually changed the ipl
+system.cpu.kern.mode_good_kernel 1923
+system.cpu.kern.mode_good_user 1762
+system.cpu.kern.mode_good_idle 161
+system.cpu.kern.mode_switch_kernel 5967 # number of protection mode switches
+system.cpu.kern.mode_switch_user 1762 # number of protection mode switches
+system.cpu.kern.mode_switch_idle 2072 # number of protection mode switches
+system.cpu.kern.mode_switch_good 0.392409 # fraction of useful protection mode switches
+system.cpu.kern.mode_switch_good_kernel 0.322272 # fraction of useful protection mode switches
system.cpu.kern.mode_switch_good_user 1 # fraction of useful protection mode switches
-system.cpu.kern.mode_switch_good_idle 0.085551 # fraction of useful protection mode switches
-system.cpu.kern.mode_ticks_kernel 58926919 1.61% 1.61% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_user 4685602 0.13% 1.74% # number of ticks spent at the given mode
-system.cpu.kern.mode_ticks_idle 3588260889 98.26% 100.00% # number of ticks spent at the given mode
-system.cpu.kern.swap_context 4204 # number of times the context was actually changed
+system.cpu.kern.mode_switch_good_idle 0.077703 # fraction of useful protection mode switches
+system.cpu.kern.mode_ticks_kernel 118227580 3.05% 3.05% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_user 18744852 0.48% 3.53% # number of ticks spent at the given mode
+system.cpu.kern.mode_ticks_idle 3737869794 96.47% 100.00% # number of ticks spent at the given mode
+system.cpu.kern.swap_context 4162 # number of times the context was actually changed
system.cpu.kern.syscall 329 # number of syscalls executed
system.cpu.kern.syscall_fork 8 2.43% 2.43% # number of syscalls executed
system.cpu.kern.syscall_read 30 9.12% 11.55% # number of syscalls executed
@@ -112,10 +112,10 @@ system.cpu.kern.syscall_connect 2 0.61% 97.57% # nu
system.cpu.kern.syscall_setgid 4 1.22% 98.78% # number of syscalls executed
system.cpu.kern.syscall_getrlimit 2 0.61% 99.39% # number of syscalls executed
system.cpu.kern.syscall_setsid 2 0.61% 100.00% # number of syscalls executed
-system.cpu.not_idle_fraction 0.021461 # Percentage of non-idle cycles
-system.cpu.numCycles 3651873858 # number of cpu cycles simulated
-system.cpu.num_insts 61760478 # Number of instructions executed
-system.cpu.num_refs 16793874 # Number of memory references
+system.cpu.not_idle_fraction 0.081055 # Percentage of non-idle cycles
+system.cpu.numCycles 3874844018 # number of cpu cycles simulated
+system.cpu.num_insts 61868161 # Number of instructions executed
+system.cpu.num_refs 16814275 # Number of memory references
system.disk0.dma_read_bytes 1024 # Number of bytes transfered via DMA reads (not PRD).
system.disk0.dma_read_full_pages 0 # Number of full page size DMA reads (not PRD).
system.disk0.dma_read_txs 1 # Number of DMA read transactions (not PRD).
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr
index 4741dd710..dbafd6309 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stderr
@@ -1,4 +1,8 @@
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
+Warning: rounding error > tolerance
+ 0.002000 rounded to 0
0: system.tsunami.io.rtc: Real-time clock set to Sun Jan 1 00:00:00 2006
-Listening for console connection on port 3457
-0: system.remote_gdb.listener: listening for remote gdb #0 on port 7001
+Listening for console connection on port 3456
+0: system.remote_gdb.listener: listening for remote gdb #0 on port 7000
warn: Entering event queue @ 0. Starting simulation...
diff --git a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
index 2ffd4c8b9..9ae43c290 100644
--- a/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
+++ b/tests/quick/10.linux-boot/ref/alpha/linux/tsunami-simple-timing/stdout
@@ -5,8 +5,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 10 2006 01:59:16
-M5 started Tue Oct 10 02:04:59 2006
-M5 executing on zamp.eecs.umich.edu
+M5 compiled Nov 5 2006 19:41:29
+M5 started Sun Nov 5 20:04:39 2006
+M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_FS/m5.opt -d build/ALPHA_FS/tests/opt/quick/10.linux-boot/alpha/linux/tsunami-simple-timing tests/run.py quick/10.linux-boot/alpha/linux/tsunami-simple-timing
-Exiting @ tick 3651873858 because m5_exit instruction encountered
+Exiting @ tick 3874844018 because m5_exit instruction encountered
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
index 95cccfbf2..8fd60d527 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=500000
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.physmem
progress_interval=0
simulate_stalls=false
system=system
@@ -83,6 +82,9 @@ system=system
[system.membus]
type=Bus
bus_id=0
+clock=1000
+responder_set=false
+width=64
port=system.physmem.port system.cpu.icache_port system.cpu.dcache_port
[system.physmem]
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
index 1138f2dbe..fe1ff652e 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/config.out
@@ -19,6 +19,9 @@ mem_mode=atomic
[system.membus]
type=Bus
bus_id=0
+clock=1000
+width=64
+responder_set=false
[system.cpu.workload]
type=EioProcess
@@ -34,7 +37,6 @@ max_insts_all_threads=0
max_loads_any_thread=0
max_loads_all_threads=0
progress_interval=0
-mem=system.physmem
system=system
cpu_id=0
workload=system.cpu.workload
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
index bbc6e55b5..50d3a76c7 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 1432213 # Simulator instruction rate (inst/s)
-host_mem_usage 147652 # Number of bytes of host memory used
-host_seconds 0.35 # Real time elapsed on the host
-host_tick_rate 1430432 # Simulator tick rate (ticks/s)
+host_inst_rate 1281059 # Simulator instruction rate (inst/s)
+host_mem_usage 147756 # Number of bytes of host memory used
+host_seconds 0.39 # Real time elapsed on the host
+host_tick_rate 1279755 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000000 # Number of seconds simulated
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
index de2559c1c..18a78c936 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-atomic/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 8 2006 14:00:39
-M5 started Sun Oct 8 14:00:58 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:57 2006
M5 executing on zizzer.eecs.umich.edu
command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-atomic tests/run.py quick/20.eio-short/alpha/eio/simple-atomic
Exiting @ tick 499999 because a thread reached the max instruction count
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
index a3e69e540..ed47bcbe5 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.ini
@@ -64,7 +64,6 @@ max_insts_all_threads=0
max_insts_any_thread=500000
max_loads_all_threads=0
max_loads_any_thread=0
-mem=system.cpu.dcache
progress_interval=0
system=system
workload=system.cpu.workload
@@ -78,7 +77,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -118,7 +116,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -158,7 +155,6 @@ assoc=2
block_size=64
compressed_bus=false
compression_latency=0
-do_copy=false
hash_delay=1
hit_latency=1
latency=1
@@ -195,6 +191,7 @@ mem_side=system.membus.port[1]
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.cpu.icache.mem_side system.cpu.dcache.mem_side system.cpu.l2cache.cpu_side
@@ -209,6 +206,7 @@ system=system
type=Bus
bus_id=0
clock=1000
+responder_set=false
width=64
port=system.physmem.port system.cpu.l2cache.mem_side
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
index 3d64b3547..2dc04ff04 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/config.out
@@ -21,10 +21,42 @@ type=Bus
bus_id=0
clock=1000
width=64
+responder_set=false
-[system.cpu.dcache]
+[system.cpu.workload]
+type=EioProcess
+file=tests/test-progs/anagram/bin/alpha/eio/anagram-vshort.eio.gz
+chkpt=
+output=cout
+system=system
+
+[system.cpu]
+type=TimingSimpleCPU
+max_insts_any_thread=500000
+max_insts_all_threads=0
+max_loads_any_thread=0
+max_loads_all_threads=0
+progress_interval=0
+system=system
+cpu_id=0
+workload=system.cpu.workload
+clock=1
+defer_registration=false
+// width not specified
+function_trace=false
+function_trace_start=0
+// simulate_stalls not specified
+
+[system.cpu.toL2Bus]
+type=Bus
+bus_id=0
+clock=1000
+width=64
+responder_set=false
+
+[system.cpu.icache]
type=BaseCache
-size=262144
+size=131072
assoc=2
block_size=64
latency=1
@@ -32,7 +64,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
@@ -61,40 +92,9 @@ prefetch_use_cpu_id=true
prefetch_data_accesses_only=false
hit_latency=1
-[system.cpu.workload]
-type=EioProcess
-file=tests/test-progs/anagram/bin/alpha/eio/anagram-vshort.eio.gz
-chkpt=
-output=cout
-system=system
-
-[system.cpu]
-type=TimingSimpleCPU
-max_insts_any_thread=500000
-max_insts_all_threads=0
-max_loads_any_thread=0
-max_loads_all_threads=0
-progress_interval=0
-mem=system.cpu.dcache
-system=system
-cpu_id=0
-workload=system.cpu.workload
-clock=1
-defer_registration=false
-// width not specified
-function_trace=false
-function_trace_start=0
-// simulate_stalls not specified
-
-[system.cpu.toL2Bus]
-type=Bus
-bus_id=0
-clock=1000
-width=64
-
-[system.cpu.icache]
+[system.cpu.dcache]
type=BaseCache
-size=131072
+size=262144
assoc=2
block_size=64
latency=1
@@ -102,7 +102,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
@@ -141,7 +140,6 @@ mshrs=10
tgts_per_mshr=5
write_buffers=8
prioritizeRequests=false
-do_copy=false
protocol=null
trace_addr=0
hash_delay=1
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
index a786f3201..d8d06877e 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/m5stats.txt
@@ -1,9 +1,9 @@
---------- Begin Simulation Statistics ----------
-host_inst_rate 66568 # Simulator instruction rate (inst/s)
-host_mem_usage 179344 # Number of bytes of host memory used
-host_seconds 7.51 # Real time elapsed on the host
-host_tick_rate 530155 # Simulator tick rate (ticks/s)
+host_inst_rate 542626 # Simulator instruction rate (inst/s)
+host_mem_usage 178896 # Number of bytes of host memory used
+host_seconds 0.92 # Real time elapsed on the host
+host_tick_rate 4319791 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 500000 # Number of instructions simulated
sim_seconds 0.000004 # Number of seconds simulated
@@ -53,7 +53,7 @@ system.cpu.dcache.no_allocate_misses 0 # Nu
system.cpu.dcache.overall_accesses 180775 # number of overall (read+write) accesses
system.cpu.dcache.overall_avg_miss_latency 3743.121145 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 2743.121145 # average overall mshr miss latency
-system.cpu.dcache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.dcache.overall_hits 180321 # number of overall hits
system.cpu.dcache.overall_miss_latency 1699377 # number of overall miss cycles
system.cpu.dcache.overall_miss_rate 0.002511 # miss rate for overall accesses
@@ -115,7 +115,7 @@ system.cpu.icache.no_allocate_misses 0 # Nu
system.cpu.icache.overall_accesses 500000 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 3977.722084 # average overall miss latency
system.cpu.icache.overall_avg_mshr_miss_latency 2977.722084 # average overall mshr miss latency
-system.cpu.icache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.icache.overall_hits 499597 # number of overall hits
system.cpu.icache.overall_miss_latency 1603022 # number of overall miss cycles
system.cpu.icache.overall_miss_rate 0.000806 # miss rate for overall accesses
@@ -177,7 +177,7 @@ system.cpu.l2cache.no_allocate_misses 0 # Nu
system.cpu.l2cache.overall_accesses 857 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 2853.441074 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 1852.441074 # average overall mshr miss latency
-system.cpu.l2cache.overall_avg_mshr_uncacheable_latency no value # average overall mshr uncacheable latency
+system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 0 # number of overall hits
system.cpu.l2cache.overall_miss_latency 2445399 # number of overall miss cycles
system.cpu.l2cache.overall_miss_rate 1 # miss rate for overall accesses
diff --git a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
index 2f704cddb..787ea041d 100644
--- a/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
+++ b/tests/quick/20.eio-short/ref/alpha/eio/simple-timing/stdout
@@ -7,8 +7,8 @@ The Regents of The University of Michigan
All Rights Reserved
-M5 compiled Oct 13 2006 16:07:10
-M5 started Fri Oct 13 16:09:55 2006
+M5 compiled Nov 3 2006 17:10:27
+M5 started Fri Nov 3 17:10:58 2006
M5 executing on zizzer.eecs.umich.edu
-command line: build/ALPHA_SE/m5.debug -d build/ALPHA_SE/tests/debug/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing
+command line: build/ALPHA_SE/m5.opt -d build/ALPHA_SE/tests/opt/quick/20.eio-short/alpha/eio/simple-timing tests/run.py quick/20.eio-short/alpha/eio/simple-timing
Exiting @ tick 3982316 because a thread reached the max instruction count