summaryrefslogtreecommitdiff
path: root/src/cpu/simple
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/simple')
-rw-r--r--src/cpu/simple/atomic.cc560
-rw-r--r--src/cpu/simple/atomic.hh139
-rw-r--r--src/cpu/simple/base.cc478
-rw-r--r--src/cpu/simple/base.hh316
-rw-r--r--src/cpu/simple/timing.cc570
-rw-r--r--src/cpu/simple/timing.hh150
6 files changed, 2213 insertions, 0 deletions
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
new file mode 100644
index 000000000..e9422b9c0
--- /dev/null
+++ b/src/cpu/simple/atomic.cc
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arch/utility.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/simple/atomic.hh"
+#include "mem/packet_impl.hh"
+#include "sim/builder.hh"
+
+using namespace std;
+using namespace TheISA;
+
+AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
+ : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
+{
+}
+
+
+void
+AtomicSimpleCPU::TickEvent::process()
+{
+ cpu->tick();
+}
+
+const char *
+AtomicSimpleCPU::TickEvent::description()
+{
+ return "AtomicSimpleCPU tick event";
+}
+
+
+void
+AtomicSimpleCPU::init()
+{
+ //Create Memory Ports (conect them up)
+ Port *mem_dport = mem->getPort("");
+ dcachePort.setPeer(mem_dport);
+ mem_dport->setPeer(&dcachePort);
+
+ Port *mem_iport = mem->getPort("");
+ icachePort.setPeer(mem_iport);
+ mem_iport->setPeer(&icachePort);
+
+ BaseCPU::init();
+#if FULL_SYSTEM
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+
+ // initialize CPU, including PC
+ TheISA::initCPU(xc, xc->readCpuId());
+ }
+#endif
+}
+
+bool
+AtomicSimpleCPU::CpuPort::recvTiming(Packet *pkt)
+{
+ panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
+ return true;
+}
+
+Tick
+AtomicSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
+{
+ panic("AtomicSimpleCPU doesn't expect recvAtomic callback!");
+ return curTick;
+}
+
+void
+AtomicSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
+{
+ panic("AtomicSimpleCPU doesn't expect recvFunctional callback!");
+}
+
+void
+AtomicSimpleCPU::CpuPort::recvStatusChange(Status status)
+{
+ if (status == RangeChange)
+ return;
+
+ panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!");
+}
+
+Packet *
+AtomicSimpleCPU::CpuPort::recvRetry()
+{
+ panic("AtomicSimpleCPU doesn't expect recvRetry callback!");
+ return NULL;
+}
+
+
+AtomicSimpleCPU::AtomicSimpleCPU(Params *p)
+ : BaseSimpleCPU(p), tickEvent(this),
+ width(p->width), simulate_stalls(p->simulate_stalls),
+ icachePort(this), dcachePort(this)
+{
+ _status = Idle;
+
+ ifetch_req = new Request(true);
+ ifetch_req->setAsid(0);
+ // @todo fix me and get the real cpu iD!!!
+ ifetch_req->setCpuNum(0);
+ ifetch_req->setSize(sizeof(MachInst));
+ ifetch_pkt = new Packet;
+ ifetch_pkt->cmd = Read;
+ ifetch_pkt->dataStatic(&inst);
+ ifetch_pkt->req = ifetch_req;
+ ifetch_pkt->size = sizeof(MachInst);
+ ifetch_pkt->dest = Packet::Broadcast;
+
+ data_read_req = new Request(true);
+ // @todo fix me and get the real cpu iD!!!
+ data_read_req->setCpuNum(0);
+ data_read_req->setAsid(0);
+ data_read_pkt = new Packet;
+ data_read_pkt->cmd = Read;
+ data_read_pkt->dataStatic(&dataReg);
+ data_read_pkt->req = data_read_req;
+ data_read_pkt->dest = Packet::Broadcast;
+
+ data_write_req = new Request(true);
+ // @todo fix me and get the real cpu iD!!!
+ data_write_req->setCpuNum(0);
+ data_write_req->setAsid(0);
+ data_write_pkt = new Packet;
+ data_write_pkt->cmd = Write;
+ data_write_pkt->req = data_write_req;
+ data_write_pkt->dest = Packet::Broadcast;
+}
+
+
+AtomicSimpleCPU::~AtomicSimpleCPU()
+{
+}
+
+void
+AtomicSimpleCPU::serialize(ostream &os)
+{
+ BaseSimpleCPU::serialize(os);
+ SERIALIZE_ENUM(_status);
+ nameOut(os, csprintf("%s.tickEvent", name()));
+ tickEvent.serialize(os);
+}
+
+void
+AtomicSimpleCPU::unserialize(Checkpoint *cp, const string &section)
+{
+ BaseSimpleCPU::unserialize(cp, section);
+ UNSERIALIZE_ENUM(_status);
+ tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
+}
+
+void
+AtomicSimpleCPU::switchOut(Sampler *s)
+{
+ sampler = s;
+ if (status() == Running) {
+ _status = SwitchedOut;
+
+ tickEvent.squash();
+ }
+ sampler->signalSwitched();
+}
+
+
+void
+AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
+{
+ BaseCPU::takeOverFrom(oldCPU);
+
+ assert(!tickEvent.scheduled());
+
+ // if any of this CPU's ExecContexts are active, mark the CPU as
+ // running and schedule its tick event.
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+ if (xc->status() == ExecContext::Active && _status != Running) {
+ _status = Running;
+ tickEvent.schedule(curTick);
+ break;
+ }
+ }
+}
+
+
+void
+AtomicSimpleCPU::activateContext(int thread_num, int delay)
+{
+ assert(thread_num == 0);
+ assert(cpuXC);
+
+ assert(_status == Idle);
+ assert(!tickEvent.scheduled());
+
+ notIdleFraction++;
+ tickEvent.schedule(curTick + cycles(delay));
+ _status = Running;
+}
+
+
+void
+AtomicSimpleCPU::suspendContext(int thread_num)
+{
+ assert(thread_num == 0);
+ assert(cpuXC);
+
+ assert(_status == Running);
+
+ // tick event may not be scheduled if this gets called from inside
+ // an instruction's execution, e.g. "quiesce"
+ if (tickEvent.scheduled())
+ tickEvent.deschedule();
+
+ notIdleFraction--;
+ _status = Idle;
+}
+
+
+template <class T>
+Fault
+AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags)
+{
+ data_read_req->setVaddr(addr);
+ data_read_req->setSize(sizeof(T));
+ data_read_req->setFlags(flags);
+ data_read_req->setTime(curTick);
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ }
+
+ // translate to physical address
+ Fault fault = cpuXC->translateDataReadReq(data_read_req);
+
+ // Now do the access.
+ if (fault == NoFault) {
+ data_read_pkt->reset();
+ data_read_pkt->addr = data_read_req->getPaddr();
+ data_read_pkt->size = sizeof(T);
+
+ dcache_complete = dcachePort.sendAtomic(data_read_pkt);
+ dcache_access = true;
+
+ assert(data_read_pkt->result == Success);
+ data = data_read_pkt->get<T>();
+
+ }
+
+ // This will need a new way to tell if it has a dcache attached.
+ if (data_read_req->getFlags() & UNCACHEABLE)
+ recordEvent("Uncached Read");
+
+ return fault;
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+template
+Fault
+AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
+
+template
+Fault
+AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
+
+template
+Fault
+AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
+
+template
+Fault
+AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags)
+{
+ return read(addr, *(uint64_t*)&data, flags);
+}
+
+template<>
+Fault
+AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags)
+{
+ return read(addr, *(uint32_t*)&data, flags);
+}
+
+
+template<>
+Fault
+AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
+{
+ return read(addr, (uint32_t&)data, flags);
+}
+
+
+template <class T>
+Fault
+AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
+{
+ data_write_req->setVaddr(addr);
+ data_write_req->setTime(curTick);
+ data_write_req->setSize(sizeof(T));
+ data_write_req->setFlags(flags);
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ }
+
+ // translate to physical address
+ Fault fault = cpuXC->translateDataWriteReq(data_write_req);
+
+ // Now do the access.
+ if (fault == NoFault) {
+ data_write_pkt->reset();
+ data = htog(data);
+ data_write_pkt->dataStatic(&data);
+ data_write_pkt->addr = data_write_req->getPaddr();
+ data_write_pkt->size = sizeof(T);
+
+ dcache_complete = dcachePort.sendAtomic(data_write_pkt);
+ dcache_access = true;
+
+ assert(data_write_pkt->result == Success);
+
+ if (res && data_write_req->getFlags() & LOCKED) {
+ *res = data_write_req->getScResult();
+ }
+ }
+
+ // This will need a new way to tell if it's hooked up to a cache or not.
+ if (data_write_req->getFlags() & UNCACHEABLE)
+ recordEvent("Uncached Write");
+
+ // If the write needs to have a fault on the access, consider calling
+ // changeStatus() and changing it to "bad addr write" or something.
+ return fault;
+}
+
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+template
+Fault
+AtomicSimpleCPU::write(uint64_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+AtomicSimpleCPU::write(uint32_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+AtomicSimpleCPU::write(uint16_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+AtomicSimpleCPU::write(uint8_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint64_t*)&data, addr, flags, res);
+}
+
+template<>
+Fault
+AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint32_t*)&data, addr, flags, res);
+}
+
+
+template<>
+Fault
+AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write((uint32_t)data, addr, flags, res);
+}
+
+
+void
+AtomicSimpleCPU::tick()
+{
+ Tick latency = cycles(1); // instruction takes one cycle by default
+
+ for (int i = 0; i < width; ++i) {
+ numCycles++;
+
+ checkForInterrupts();
+
+ ifetch_req->resetMin();
+ ifetch_pkt->reset();
+ Fault fault = setupFetchPacket(ifetch_pkt);
+
+ if (fault == NoFault) {
+ Tick icache_complete = icachePort.sendAtomic(ifetch_pkt);
+ // ifetch_req is initialized to read the instruction directly
+ // into the CPU object's inst field.
+
+ dcache_access = false; // assume no dcache access
+ preExecute();
+ fault = curStaticInst->execute(this, traceData);
+ postExecute();
+
+ if (traceData) {
+ traceData->finalize();
+ }
+
+ if (simulate_stalls) {
+ // This calculation assumes that the icache and dcache
+ // access latencies are always a multiple of the CPU's
+ // cycle time. If not, the next tick event may get
+ // scheduled at a non-integer multiple of the CPU
+ // cycle time.
+ Tick icache_stall = icache_complete - curTick - cycles(1);
+ Tick dcache_stall =
+ dcache_access ? dcache_complete - curTick - cycles(1) : 0;
+ latency += icache_stall + dcache_stall;
+ }
+
+ }
+
+ advancePC(fault);
+ }
+
+ if (_status != Idle)
+ tickEvent.schedule(curTick + latency);
+}
+
+
+////////////////////////////////////////////////////////////////////////
+//
+// AtomicSimpleCPU Simulation Object
+//
+BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
+
+ Param<Counter> max_insts_any_thread;
+ Param<Counter> max_insts_all_threads;
+ Param<Counter> max_loads_any_thread;
+ Param<Counter> max_loads_all_threads;
+ SimObjectParam<MemObject *> mem;
+
+#if FULL_SYSTEM
+ SimObjectParam<AlphaITB *> itb;
+ SimObjectParam<AlphaDTB *> dtb;
+ SimObjectParam<System *> system;
+ Param<int> cpu_id;
+ Param<Tick> profile;
+#else
+ SimObjectParam<Process *> workload;
+#endif // FULL_SYSTEM
+
+ Param<int> clock;
+
+ Param<bool> defer_registration;
+ Param<int> width;
+ Param<bool> function_trace;
+ Param<Tick> function_trace_start;
+ Param<bool> simulate_stalls;
+
+END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
+
+BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
+
+ INIT_PARAM(max_insts_any_thread,
+ "terminate when any thread reaches this inst count"),
+ INIT_PARAM(max_insts_all_threads,
+ "terminate when all threads have reached this inst count"),
+ INIT_PARAM(max_loads_any_thread,
+ "terminate when any thread reaches this load count"),
+ INIT_PARAM(max_loads_all_threads,
+ "terminate when all threads have reached this load count"),
+ INIT_PARAM(mem, "memory"),
+
+#if FULL_SYSTEM
+ INIT_PARAM(itb, "Instruction TLB"),
+ INIT_PARAM(dtb, "Data TLB"),
+ INIT_PARAM(system, "system object"),
+ INIT_PARAM(cpu_id, "processor ID"),
+ INIT_PARAM(profile, ""),
+#else
+ INIT_PARAM(workload, "processes to run"),
+#endif // FULL_SYSTEM
+
+ INIT_PARAM(clock, "clock speed"),
+ INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
+ INIT_PARAM(width, "cpu width"),
+ INIT_PARAM(function_trace, "Enable function trace"),
+ INIT_PARAM(function_trace_start, "Cycle to start function trace"),
+ INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
+
+END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU)
+
+
+CREATE_SIM_OBJECT(AtomicSimpleCPU)
+{
+ AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params();
+ params->name = getInstanceName();
+ params->numberOfThreads = 1;
+ params->max_insts_any_thread = max_insts_any_thread;
+ params->max_insts_all_threads = max_insts_all_threads;
+ params->max_loads_any_thread = max_loads_any_thread;
+ params->max_loads_all_threads = max_loads_all_threads;
+ params->deferRegistration = defer_registration;
+ params->clock = clock;
+ params->functionTrace = function_trace;
+ params->functionTraceStart = function_trace_start;
+ params->width = width;
+ params->simulate_stalls = simulate_stalls;
+ params->mem = mem;
+
+#if FULL_SYSTEM
+ params->itb = itb;
+ params->dtb = dtb;
+ params->system = system;
+ params->cpu_id = cpu_id;
+ params->profile = profile;
+#else
+ params->process = workload;
+#endif
+
+ AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params);
+ return cpu;
+}
+
+REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU)
+
diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh
new file mode 100644
index 000000000..d0ba085f0
--- /dev/null
+++ b/src/cpu/simple/atomic.hh
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_SIMPLE_ATOMIC_HH__
+#define __CPU_SIMPLE_ATOMIC_HH__
+
+#include "cpu/simple/base.hh"
+
+class AtomicSimpleCPU : public BaseSimpleCPU
+{
+ public:
+
+ struct Params : public BaseSimpleCPU::Params {
+ int width;
+ bool simulate_stalls;
+ };
+
+ AtomicSimpleCPU(Params *params);
+ virtual ~AtomicSimpleCPU();
+
+ virtual void init();
+
+ public:
+ //
+ enum Status {
+ Running,
+ Idle,
+ SwitchedOut
+ };
+
+ protected:
+ Status _status;
+
+ Status status() const { return _status; }
+
+ private:
+
+ struct TickEvent : public Event
+ {
+ AtomicSimpleCPU *cpu;
+
+ TickEvent(AtomicSimpleCPU *c);
+ void process();
+ const char *description();
+ };
+
+ TickEvent tickEvent;
+
+ const int width;
+ const bool simulate_stalls;
+
+ // main simulation loop (one cycle)
+ void tick();
+
+ class CpuPort : public Port
+ {
+
+ AtomicSimpleCPU *cpu;
+
+ public:
+
+ CpuPort(AtomicSimpleCPU *_cpu)
+ : cpu(_cpu)
+ { }
+
+ protected:
+
+ virtual bool recvTiming(Packet *pkt);
+
+ virtual Tick recvAtomic(Packet *pkt);
+
+ virtual void recvFunctional(Packet *pkt);
+
+ virtual void recvStatusChange(Status status);
+
+ virtual Packet *recvRetry();
+
+ virtual void getDeviceAddressRanges(AddrRangeList &resp,
+ AddrRangeList &snoop)
+ { resp.clear(); snoop.clear(); }
+ };
+
+ CpuPort icachePort;
+ CpuPort dcachePort;
+
+ Request *ifetch_req;
+ Packet *ifetch_pkt;
+ Request *data_read_req;
+ Packet *data_read_pkt;
+ Request *data_write_req;
+ Packet *data_write_pkt;
+
+ bool dcache_access;
+ Tick dcache_complete;
+
+ public:
+
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string &section);
+
+ void switchOut(Sampler *s);
+ void takeOverFrom(BaseCPU *oldCPU);
+
+ virtual void activateContext(int thread_num, int delay);
+ virtual void suspendContext(int thread_num);
+
+ template <class T>
+ Fault read(Addr addr, T &data, unsigned flags);
+
+ template <class T>
+ Fault write(T data, Addr addr, unsigned flags, uint64_t *res);
+};
+
+#endif // __CPU_SIMPLE_ATOMIC_HH__
diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc
new file mode 100644
index 000000000..30c002ed5
--- /dev/null
+++ b/src/cpu/simple/base.cc
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arch/utility.hh"
+#include "base/cprintf.hh"
+#include "base/inifile.hh"
+#include "base/loader/symtab.hh"
+#include "base/misc.hh"
+#include "base/pollevent.hh"
+#include "base/range.hh"
+#include "base/stats/events.hh"
+#include "base/trace.hh"
+#include "cpu/base.hh"
+#include "cpu/cpu_exec_context.hh"
+#include "cpu/exec_context.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/profile.hh"
+#include "cpu/sampler/sampler.hh"
+#include "cpu/simple/base.hh"
+#include "cpu/smt.hh"
+#include "cpu/static_inst.hh"
+#include "kern/kernel_stats.hh"
+#include "mem/packet_impl.hh"
+#include "sim/byteswap.hh"
+#include "sim/builder.hh"
+#include "sim/debug.hh"
+#include "sim/host.hh"
+#include "sim/sim_events.hh"
+#include "sim/sim_object.hh"
+#include "sim/stats.hh"
+
+#if FULL_SYSTEM
+#include "base/remote_gdb.hh"
+#include "sim/system.hh"
+#include "arch/tlb.hh"
+#include "arch/stacktrace.hh"
+#include "arch/vtophys.hh"
+#else // !FULL_SYSTEM
+#include "mem/mem_object.hh"
+#endif // FULL_SYSTEM
+
+using namespace std;
+using namespace TheISA;
+
+BaseSimpleCPU::BaseSimpleCPU(Params *p)
+ : BaseCPU(p), mem(p->mem), cpuXC(NULL)
+{
+#if FULL_SYSTEM
+ cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb);
+#else
+ cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process,
+ /* asid */ 0, mem);
+#endif // !FULL_SYSTEM
+
+ xcProxy = cpuXC->getProxy();
+
+ numInst = 0;
+ startNumInst = 0;
+ numLoad = 0;
+ startNumLoad = 0;
+ lastIcacheStall = 0;
+ lastDcacheStall = 0;
+
+ execContexts.push_back(xcProxy);
+}
+
+BaseSimpleCPU::~BaseSimpleCPU()
+{
+}
+
+void
+BaseSimpleCPU::deallocateContext(int thread_num)
+{
+ // for now, these are equivalent
+ suspendContext(thread_num);
+}
+
+
+void
+BaseSimpleCPU::haltContext(int thread_num)
+{
+ // for now, these are equivalent
+ suspendContext(thread_num);
+}
+
+
+void
+BaseSimpleCPU::regStats()
+{
+ using namespace Stats;
+
+ BaseCPU::regStats();
+
+ numInsts
+ .name(name() + ".num_insts")
+ .desc("Number of instructions executed")
+ ;
+
+ numMemRefs
+ .name(name() + ".num_refs")
+ .desc("Number of memory references")
+ ;
+
+ notIdleFraction
+ .name(name() + ".not_idle_fraction")
+ .desc("Percentage of non-idle cycles")
+ ;
+
+ idleFraction
+ .name(name() + ".idle_fraction")
+ .desc("Percentage of idle cycles")
+ ;
+
+ icacheStallCycles
+ .name(name() + ".icache_stall_cycles")
+ .desc("ICache total stall cycles")
+ .prereq(icacheStallCycles)
+ ;
+
+ dcacheStallCycles
+ .name(name() + ".dcache_stall_cycles")
+ .desc("DCache total stall cycles")
+ .prereq(dcacheStallCycles)
+ ;
+
+ icacheRetryCycles
+ .name(name() + ".icache_retry_cycles")
+ .desc("ICache total retry cycles")
+ .prereq(icacheRetryCycles)
+ ;
+
+ dcacheRetryCycles
+ .name(name() + ".dcache_retry_cycles")
+ .desc("DCache total retry cycles")
+ .prereq(dcacheRetryCycles)
+ ;
+
+ idleFraction = constant(1.0) - notIdleFraction;
+}
+
+void
+BaseSimpleCPU::resetStats()
+{
+ startNumInst = numInst;
+ // notIdleFraction = (_status != Idle);
+}
+
+void
+BaseSimpleCPU::serialize(ostream &os)
+{
+ BaseCPU::serialize(os);
+ SERIALIZE_SCALAR(inst);
+ nameOut(os, csprintf("%s.xc", name()));
+ cpuXC->serialize(os);
+}
+
+void
+BaseSimpleCPU::unserialize(Checkpoint *cp, const string &section)
+{
+ BaseCPU::unserialize(cp, section);
+ UNSERIALIZE_SCALAR(inst);
+ cpuXC->unserialize(cp, csprintf("%s.xc", section));
+}
+
+void
+change_thread_state(int thread_number, int activate, int priority)
+{
+}
+
+Fault
+BaseSimpleCPU::copySrcTranslate(Addr src)
+{
+#if 0
+ static bool no_warn = true;
+ int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
+ // Only support block sizes of 64 atm.
+ assert(blk_size == 64);
+ int offset = src & (blk_size - 1);
+
+ // Make sure block doesn't span page
+ if (no_warn &&
+ (src & PageMask) != ((src + blk_size) & PageMask) &&
+ (src >> 40) != 0xfffffc) {
+ warn("Copied block source spans pages %x.", src);
+ no_warn = false;
+ }
+
+ memReq->reset(src & ~(blk_size - 1), blk_size);
+
+ // translate to physical address
+ Fault fault = cpuXC->translateDataReadReq(req);
+
+ if (fault == NoFault) {
+ cpuXC->copySrcAddr = src;
+ cpuXC->copySrcPhysAddr = memReq->paddr + offset;
+ } else {
+ assert(!fault->isAlignmentFault());
+
+ cpuXC->copySrcAddr = 0;
+ cpuXC->copySrcPhysAddr = 0;
+ }
+ return fault;
+#else
+ return NoFault;
+#endif
+}
+
+Fault
+BaseSimpleCPU::copy(Addr dest)
+{
+#if 0
+ static bool no_warn = true;
+ int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
+ // Only support block sizes of 64 atm.
+ assert(blk_size == 64);
+ uint8_t data[blk_size];
+ //assert(cpuXC->copySrcAddr);
+ int offset = dest & (blk_size - 1);
+
+ // Make sure block doesn't span page
+ if (no_warn &&
+ (dest & PageMask) != ((dest + blk_size) & PageMask) &&
+ (dest >> 40) != 0xfffffc) {
+ no_warn = false;
+ warn("Copied block destination spans pages %x. ", dest);
+ }
+
+ memReq->reset(dest & ~(blk_size -1), blk_size);
+ // translate to physical address
+ Fault fault = cpuXC->translateDataWriteReq(req);
+
+ if (fault == NoFault) {
+ Addr dest_addr = memReq->paddr + offset;
+ // Need to read straight from memory since we have more than 8 bytes.
+ memReq->paddr = cpuXC->copySrcPhysAddr;
+ cpuXC->mem->read(memReq, data);
+ memReq->paddr = dest_addr;
+ cpuXC->mem->write(memReq, data);
+ if (dcacheInterface) {
+ memReq->cmd = Copy;
+ memReq->completionEvent = NULL;
+ memReq->paddr = cpuXC->copySrcPhysAddr;
+ memReq->dest = dest_addr;
+ memReq->size = 64;
+ memReq->time = curTick;
+ memReq->flags &= ~INST_READ;
+ dcacheInterface->access(memReq);
+ }
+ }
+ else
+ assert(!fault->isAlignmentFault());
+
+ return fault;
+#else
+ panic("copy not implemented");
+ return NoFault;
+#endif
+}
+
+#if FULL_SYSTEM
+Addr
+BaseSimpleCPU::dbg_vtophys(Addr addr)
+{
+ return vtophys(xcProxy, addr);
+}
+#endif // FULL_SYSTEM
+
+#if FULL_SYSTEM
+void
+BaseSimpleCPU::post_interrupt(int int_num, int index)
+{
+ BaseCPU::post_interrupt(int_num, index);
+
+ if (cpuXC->status() == ExecContext::Suspended) {
+ DPRINTF(IPI,"Suspended Processor awoke\n");
+ cpuXC->activate();
+ }
+}
+#endif // FULL_SYSTEM
+
+void
+BaseSimpleCPU::checkForInterrupts()
+{
+#if FULL_SYSTEM
+ if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode()) {
+ int ipl = 0;
+ int summary = 0;
+ checkInterrupts = false;
+
+ if (cpuXC->readMiscReg(IPR_SIRR)) {
+ for (int i = INTLEVEL_SOFTWARE_MIN;
+ i < INTLEVEL_SOFTWARE_MAX; i++) {
+ if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
+ // See table 4-19 of 21164 hardware reference
+ ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
+ summary |= (ULL(1) << i);
+ }
+ }
+ }
+
+ uint64_t interrupts = cpuXC->cpu->intr_status();
+ for (int i = INTLEVEL_EXTERNAL_MIN;
+ i < INTLEVEL_EXTERNAL_MAX; i++) {
+ if (interrupts & (ULL(1) << i)) {
+ // See table 4-19 of 21164 hardware reference
+ ipl = i;
+ summary |= (ULL(1) << i);
+ }
+ }
+
+ if (cpuXC->readMiscReg(IPR_ASTRR))
+ panic("asynchronous traps not implemented\n");
+
+ if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) {
+ cpuXC->setMiscReg(IPR_ISR, summary);
+ cpuXC->setMiscReg(IPR_INTID, ipl);
+
+ Fault(new InterruptFault)->invoke(xcProxy);
+
+ DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
+ cpuXC->readMiscReg(IPR_IPLR), ipl, summary);
+ }
+ }
+#endif
+}
+
+
+Fault
+BaseSimpleCPU::setupFetchPacket(Packet *ifetch_pkt)
+{
+ // Try to fetch an instruction
+
+ // set up memory request for instruction fetch
+
+ DPRINTF(Fetch,"Fetch: PC:%08p NPC:%08p NNPC:%08p\n",cpuXC->readPC(),
+ cpuXC->readNextPC(),cpuXC->readNextNPC());
+
+ Request *ifetch_req = ifetch_pkt->req;
+ ifetch_req->setVaddr(cpuXC->readPC() & ~3);
+ ifetch_req->setTime(curTick);
+#if FULL_SYSTEM
+ ifetch_req->setFlags((cpuXC->readPC() & 1) ? PHYSICAL : 0);
+#else
+ ifetch_req->setFlags(0);
+#endif
+
+ Fault fault = cpuXC->translateInstReq(ifetch_req);
+
+ if (fault == NoFault) {
+ ifetch_pkt->addr = ifetch_req->getPaddr();
+ }
+
+ return fault;
+}
+
+
+void
+BaseSimpleCPU::preExecute()
+{
+ // maintain $r0 semantics
+ cpuXC->setIntReg(ZeroReg, 0);
+#if THE_ISA == ALPHA_ISA
+ cpuXC->setFloatReg(ZeroReg, 0.0);
+#endif // ALPHA_ISA
+
+ // keep an instruction count
+ numInst++;
+ numInsts++;
+
+ cpuXC->func_exe_inst++;
+
+ // check for instruction-count-based events
+ comInstEventQueue[0]->serviceEvents(numInst);
+
+ // decode the instruction
+ inst = gtoh(inst);
+ curStaticInst = StaticInst::decode(makeExtMI(inst, cpuXC->readPC()));
+
+ traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst,
+ cpuXC->readPC());
+
+ DPRINTF(Decode,"Decode: Decoded %s instruction (opcode: 0x%x): 0x%x\n",
+ curStaticInst->getName(), curStaticInst->getOpcode(),
+ curStaticInst->machInst);
+
+#if FULL_SYSTEM
+ cpuXC->setInst(inst);
+#endif // FULL_SYSTEM
+}
+
+void
+BaseSimpleCPU::postExecute()
+{
+#if FULL_SYSTEM
+ if (system->kernelBinning->fnbin) {
+ assert(kernelStats);
+ system->kernelBinning->execute(xcProxy, inst);
+ }
+
+ if (cpuXC->profile) {
+ bool usermode =
+ (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
+ cpuXC->profilePC = usermode ? 1 : cpuXC->readPC();
+ ProfileNode *node = cpuXC->profile->consume(xcProxy, inst);
+ if (node)
+ cpuXC->profileNode = node;
+ }
+#endif
+
+ if (curStaticInst->isMemRef()) {
+ numMemRefs++;
+ }
+
+ if (curStaticInst->isLoad()) {
+ ++numLoad;
+ comLoadEventQueue[0]->serviceEvents(numLoad);
+ }
+
+ traceFunctions(cpuXC->readPC());
+}
+
+
+void
+BaseSimpleCPU::advancePC(Fault fault)
+{
+ if (fault != NoFault) {
+#if FULL_SYSTEM
+ fault->invoke(xcProxy);
+#else // !FULL_SYSTEM
+ fatal("fault (%s) detected @ PC %08p", fault->name(), cpuXC->readPC());
+#endif // FULL_SYSTEM
+ }
+ else {
+ // go to the next instruction
+ cpuXC->setPC(cpuXC->readNextPC());
+#if THE_ISA == ALPHA_ISA
+ cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst));
+#else
+ cpuXC->setNextPC(cpuXC->readNextNPC());
+ cpuXC->setNextNPC(cpuXC->readNextNPC() + sizeof(MachInst));
+#endif
+
+ }
+
+#if FULL_SYSTEM
+ Addr oldpc;
+ do {
+ oldpc = cpuXC->readPC();
+ system->pcEventQueue.service(xcProxy);
+ } while (oldpc != cpuXC->readPC());
+#endif
+}
+
diff --git a/src/cpu/simple/base.hh b/src/cpu/simple/base.hh
new file mode 100644
index 000000000..4c0e6f3c7
--- /dev/null
+++ b/src/cpu/simple/base.hh
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_SIMPLE_BASE_HH__
+#define __CPU_SIMPLE_BASE_HH__
+
+#include "base/statistics.hh"
+#include "config/full_system.hh"
+#include "cpu/base.hh"
+#include "cpu/cpu_exec_context.hh"
+#include "cpu/pc_event.hh"
+#include "cpu/sampler/sampler.hh"
+#include "cpu/static_inst.hh"
+#include "mem/packet.hh"
+#include "mem/port.hh"
+#include "mem/request.hh"
+#include "sim/eventq.hh"
+
+// forward declarations
+#if FULL_SYSTEM
+class Processor;
+class AlphaITB;
+class AlphaDTB;
+class MemObject;
+
+class RemoteGDB;
+class GDBListener;
+
+#else
+
+class Process;
+
+#endif // FULL_SYSTEM
+
+class ExecContext;
+class Checkpoint;
+
+namespace Trace {
+ class InstRecord;
+}
+
+
+class BaseSimpleCPU : public BaseCPU
+{
+ protected:
+ typedef TheISA::MachInst MachInst;
+ typedef TheISA::MiscReg MiscReg;
+ typedef TheISA::FloatReg FloatReg;
+ typedef TheISA::FloatRegBits FloatRegBits;
+
+ MemObject *mem;
+
+ protected:
+ Trace::InstRecord *traceData;
+
+ public:
+ void post_interrupt(int int_num, int index);
+
+ void zero_fill_64(Addr addr) {
+ static int warned = 0;
+ if (!warned) {
+ warn ("WH64 is not implemented");
+ warned = 1;
+ }
+ };
+
+ public:
+ struct Params : public BaseCPU::Params
+ {
+ MemObject *mem;
+#if FULL_SYSTEM
+ AlphaITB *itb;
+ AlphaDTB *dtb;
+#else
+ Process *process;
+#endif
+ };
+ BaseSimpleCPU(Params *params);
+ virtual ~BaseSimpleCPU();
+
+ public:
+ // execution context
+ CPUExecContext *cpuXC;
+
+ ExecContext *xcProxy;
+
+#if FULL_SYSTEM
+ Addr dbg_vtophys(Addr addr);
+
+ bool interval_stats;
+#endif
+
+ // current instruction
+ MachInst inst;
+
+ // Static data storage
+ TheISA::IntReg dataReg;
+
+ // Pointer to the sampler that is telling us to switchover.
+ // Used to signal the completion of the pipe drain and schedule
+ // the next switchover
+ Sampler *sampler;
+
+ StaticInstPtr curStaticInst;
+
+ void checkForInterrupts();
+ Fault setupFetchPacket(Packet *ifetch_pkt);
+ void preExecute();
+ void postExecute();
+ void advancePC(Fault fault);
+
+ virtual void deallocateContext(int thread_num);
+ virtual void haltContext(int thread_num);
+
+ // statistics
+ virtual void regStats();
+ virtual void resetStats();
+
+ // number of simulated instructions
+ Counter numInst;
+ Counter startNumInst;
+ Stats::Scalar<> numInsts;
+
+ virtual Counter totalInstructions() const
+ {
+ return numInst - startNumInst;
+ }
+
+ // number of simulated memory references
+ Stats::Scalar<> numMemRefs;
+
+ // number of simulated loads
+ Counter numLoad;
+ Counter startNumLoad;
+
+ // number of idle cycles
+ Stats::Average<> notIdleFraction;
+ Stats::Formula idleFraction;
+
+ // number of cycles stalled for I-cache responses
+ Stats::Scalar<> icacheStallCycles;
+ Counter lastIcacheStall;
+
+ // number of cycles stalled for I-cache retries
+ Stats::Scalar<> icacheRetryCycles;
+ Counter lastIcacheRetry;
+
+ // number of cycles stalled for D-cache responses
+ Stats::Scalar<> dcacheStallCycles;
+ Counter lastDcacheStall;
+
+ // number of cycles stalled for D-cache retries
+ Stats::Scalar<> dcacheRetryCycles;
+ Counter lastDcacheRetry;
+
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string &section);
+
+ // These functions are only used in CPU models that split
+ // effective address computation from the actual memory access.
+ void setEA(Addr EA) { panic("BaseSimpleCPU::setEA() not implemented\n"); }
+ Addr getEA() { panic("BaseSimpleCPU::getEA() not implemented\n"); }
+
+ void prefetch(Addr addr, unsigned flags)
+ {
+ // need to do this...
+ }
+
+ void writeHint(Addr addr, int size, unsigned flags)
+ {
+ // need to do this...
+ }
+
+ Fault copySrcTranslate(Addr src);
+
+ Fault copy(Addr dest);
+
+ // The register accessor methods provide the index of the
+ // instruction's operand (e.g., 0 or 1), not the architectural
+ // register index, to simplify the implementation of register
+ // renaming. We find the architectural register index by indexing
+ // into the instruction's own operand index table. Note that a
+ // raw pointer to the StaticInst is provided instead of a
+ // ref-counted StaticInstPtr to redice overhead. This is fine as
+ // long as these methods don't copy the pointer into any long-term
+ // storage (which is pretty hard to imagine they would have reason
+ // to do).
+
+ uint64_t readIntReg(const StaticInst *si, int idx)
+ {
+ return cpuXC->readIntReg(si->srcRegIdx(idx));
+ }
+
+ FloatReg readFloatReg(const StaticInst *si, int idx, int width)
+ {
+ int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
+ return cpuXC->readFloatReg(reg_idx, width);
+ }
+
+ FloatReg readFloatReg(const StaticInst *si, int idx)
+ {
+ int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
+ return cpuXC->readFloatReg(reg_idx);
+ }
+
+ FloatRegBits readFloatRegBits(const StaticInst *si, int idx, int width)
+ {
+ int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
+ return cpuXC->readFloatRegBits(reg_idx, width);
+ }
+
+ FloatRegBits readFloatRegBits(const StaticInst *si, int idx)
+ {
+ int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
+ return cpuXC->readFloatRegBits(reg_idx);
+ }
+
+ void setIntReg(const StaticInst *si, int idx, uint64_t val)
+ {
+ cpuXC->setIntReg(si->destRegIdx(idx), val);
+ }
+
+ void setFloatReg(const StaticInst *si, int idx, FloatReg val, int width)
+ {
+ int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
+ cpuXC->setFloatReg(reg_idx, val, width);
+ }
+
+ void setFloatReg(const StaticInst *si, int idx, FloatReg val)
+ {
+ int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
+ cpuXC->setFloatReg(reg_idx, val);
+ }
+
+ void setFloatRegBits(const StaticInst *si, int idx,
+ FloatRegBits val, int width)
+ {
+ int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
+ cpuXC->setFloatRegBits(reg_idx, val, width);
+ }
+
+ void setFloatRegBits(const StaticInst *si, int idx, FloatRegBits val)
+ {
+ int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
+ cpuXC->setFloatRegBits(reg_idx, val);
+ }
+
+ uint64_t readPC() { return cpuXC->readPC(); }
+ uint64_t readNextPC() { return cpuXC->readNextPC(); }
+ uint64_t readNextNPC() { return cpuXC->readNextNPC(); }
+
+ void setPC(uint64_t val) { cpuXC->setPC(val); }
+ void setNextPC(uint64_t val) { cpuXC->setNextPC(val); }
+ void setNextNPC(uint64_t val) { cpuXC->setNextNPC(val); }
+
+ MiscReg readMiscReg(int misc_reg)
+ {
+ return cpuXC->readMiscReg(misc_reg);
+ }
+
+ MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault)
+ {
+ return cpuXC->readMiscRegWithEffect(misc_reg, fault);
+ }
+
+ Fault setMiscReg(int misc_reg, const MiscReg &val)
+ {
+ return cpuXC->setMiscReg(misc_reg, val);
+ }
+
+ Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val)
+ {
+ return cpuXC->setMiscRegWithEffect(misc_reg, val);
+ }
+
+#if FULL_SYSTEM
+ Fault hwrei() { return cpuXC->hwrei(); }
+ int readIntrFlag() { return cpuXC->readIntrFlag(); }
+ void setIntrFlag(int val) { cpuXC->setIntrFlag(val); }
+ bool inPalMode() { return cpuXC->inPalMode(); }
+ void ev5_trap(Fault fault) { fault->invoke(xcProxy); }
+ bool simPalCheck(int palFunc) { return cpuXC->simPalCheck(palFunc); }
+#else
+ void syscall(int64_t callnum) { cpuXC->syscall(callnum); }
+#endif
+
+ bool misspeculating() { return cpuXC->misspeculating(); }
+ ExecContext *xcBase() { return xcProxy; }
+};
+
+#endif // __CPU_SIMPLE_BASE_HH__
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
new file mode 100644
index 000000000..70b88c4b1
--- /dev/null
+++ b/src/cpu/simple/timing.cc
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arch/utility.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/simple/timing.hh"
+#include "mem/packet_impl.hh"
+#include "sim/builder.hh"
+
+using namespace std;
+using namespace TheISA;
+
+
+void
+TimingSimpleCPU::init()
+{
+ //Create Memory Ports (conect them up)
+ Port *mem_dport = mem->getPort("");
+ dcachePort.setPeer(mem_dport);
+ mem_dport->setPeer(&dcachePort);
+
+ Port *mem_iport = mem->getPort("");
+ icachePort.setPeer(mem_iport);
+ mem_iport->setPeer(&icachePort);
+
+ BaseCPU::init();
+#if FULL_SYSTEM
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+
+ // initialize CPU, including PC
+ TheISA::initCPU(xc, xc->readCpuId());
+ }
+#endif
+}
+
+Tick
+TimingSimpleCPU::CpuPort::recvAtomic(Packet *pkt)
+{
+ panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
+ return curTick;
+}
+
+void
+TimingSimpleCPU::CpuPort::recvFunctional(Packet *pkt)
+{
+ panic("TimingSimpleCPU doesn't expect recvFunctional callback!");
+}
+
+void
+TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
+{
+ if (status == RangeChange)
+ return;
+
+ panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
+}
+
+TimingSimpleCPU::TimingSimpleCPU(Params *p)
+ : BaseSimpleCPU(p), icachePort(this), dcachePort(this)
+{
+ _status = Idle;
+ ifetch_pkt = dcache_pkt = NULL;
+}
+
+
+TimingSimpleCPU::~TimingSimpleCPU()
+{
+}
+
+void
+TimingSimpleCPU::serialize(ostream &os)
+{
+ BaseSimpleCPU::serialize(os);
+ SERIALIZE_ENUM(_status);
+}
+
+void
+TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
+{
+ BaseSimpleCPU::unserialize(cp, section);
+ UNSERIALIZE_ENUM(_status);
+}
+
+void
+TimingSimpleCPU::switchOut(Sampler *s)
+{
+ sampler = s;
+ if (status() == Running) {
+ _status = SwitchedOut;
+ }
+ sampler->signalSwitched();
+}
+
+
+void
+TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
+{
+ BaseCPU::takeOverFrom(oldCPU);
+
+ // if any of this CPU's ExecContexts are active, mark the CPU as
+ // running and schedule its tick event.
+ for (int i = 0; i < execContexts.size(); ++i) {
+ ExecContext *xc = execContexts[i];
+ if (xc->status() == ExecContext::Active && _status != Running) {
+ _status = Running;
+ break;
+ }
+ }
+}
+
+
+void
+TimingSimpleCPU::activateContext(int thread_num, int delay)
+{
+ assert(thread_num == 0);
+ assert(cpuXC);
+
+ assert(_status == Idle);
+
+ notIdleFraction++;
+ _status = Running;
+ // kick things off by initiating the fetch of the next instruction
+ Event *e =
+ new EventWrapper<TimingSimpleCPU, &TimingSimpleCPU::fetch>(this, true);
+ e->schedule(curTick + cycles(delay));
+}
+
+
+void
+TimingSimpleCPU::suspendContext(int thread_num)
+{
+ assert(thread_num == 0);
+ assert(cpuXC);
+
+ panic("TimingSimpleCPU::suspendContext not implemented");
+
+ assert(_status == Running);
+
+ notIdleFraction--;
+ _status = Idle;
+}
+
+
+template <class T>
+Fault
+TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
+{
+ Request *data_read_req = new Request(true);
+
+ data_read_req->setVaddr(addr);
+ data_read_req->setSize(sizeof(T));
+ data_read_req->setFlags(flags);
+ data_read_req->setTime(curTick);
+
+ if (traceData) {
+ traceData->setAddr(data_read_req->getVaddr());
+ }
+
+ // translate to physical address
+ Fault fault = cpuXC->translateDataReadReq(data_read_req);
+
+ // Now do the access.
+ if (fault == NoFault) {
+ Packet *data_read_pkt = new Packet;
+ data_read_pkt->cmd = Read;
+ data_read_pkt->req = data_read_req;
+ data_read_pkt->dataDynamic<T>(new T);
+ data_read_pkt->addr = data_read_req->getPaddr();
+ data_read_pkt->size = sizeof(T);
+ data_read_pkt->dest = Packet::Broadcast;
+
+ if (!dcachePort.sendTiming(data_read_pkt)) {
+ _status = DcacheRetry;
+ dcache_pkt = data_read_pkt;
+ } else {
+ _status = DcacheWaitResponse;
+ dcache_pkt = NULL;
+ }
+ }
+
+ // This will need a new way to tell if it has a dcache attached.
+ if (data_read_req->getFlags() & UNCACHEABLE)
+ recordEvent("Uncached Read");
+
+ return fault;
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+template
+Fault
+TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
+
+template
+Fault
+TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
+
+template
+Fault
+TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
+
+template
+Fault
+TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
+{
+ return read(addr, *(uint64_t*)&data, flags);
+}
+
+template<>
+Fault
+TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
+{
+ return read(addr, *(uint32_t*)&data, flags);
+}
+
+
+template<>
+Fault
+TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
+{
+ return read(addr, (uint32_t&)data, flags);
+}
+
+
+template <class T>
+Fault
+TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
+{
+ Request *data_write_req = new Request(true);
+ data_write_req->setVaddr(addr);
+ data_write_req->setTime(curTick);
+ data_write_req->setSize(sizeof(T));
+ data_write_req->setFlags(flags);
+
+ // translate to physical address
+ Fault fault = cpuXC->translateDataWriteReq(data_write_req);
+ // Now do the access.
+ if (fault == NoFault) {
+ Packet *data_write_pkt = new Packet;
+ data_write_pkt->cmd = Write;
+ data_write_pkt->req = data_write_req;
+ data_write_pkt->allocate();
+ data_write_pkt->size = sizeof(T);
+ data_write_pkt->set(data);
+ data_write_pkt->addr = data_write_req->getPaddr();
+ data_write_pkt->dest = Packet::Broadcast;
+
+ if (!dcachePort.sendTiming(data_write_pkt)) {
+ _status = DcacheRetry;
+ dcache_pkt = data_write_pkt;
+ } else {
+ _status = DcacheWaitResponse;
+ dcache_pkt = NULL;
+ }
+ }
+
+ // This will need a new way to tell if it's hooked up to a cache or not.
+ if (data_write_req->getFlags() & UNCACHEABLE)
+ recordEvent("Uncached Write");
+
+ // If the write needs to have a fault on the access, consider calling
+ // changeStatus() and changing it to "bad addr write" or something.
+ return fault;
+}
+
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+template
+Fault
+TimingSimpleCPU::write(uint64_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+TimingSimpleCPU::write(uint32_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+TimingSimpleCPU::write(uint16_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+TimingSimpleCPU::write(uint8_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint64_t*)&data, addr, flags, res);
+}
+
+template<>
+Fault
+TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint32_t*)&data, addr, flags, res);
+}
+
+
+template<>
+Fault
+TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write((uint32_t)data, addr, flags, res);
+}
+
+
+void
+TimingSimpleCPU::fetch()
+{
+ checkForInterrupts();
+
+ Request *ifetch_req = new Request(true);
+ ifetch_req->setSize(sizeof(MachInst));
+
+ ifetch_pkt = new Packet;
+ ifetch_pkt->cmd = Read;
+ ifetch_pkt->dataStatic(&inst);
+ ifetch_pkt->req = ifetch_req;
+ ifetch_pkt->size = sizeof(MachInst);
+ ifetch_pkt->dest = Packet::Broadcast;
+
+ Fault fault = setupFetchPacket(ifetch_pkt);
+ if (fault == NoFault) {
+ if (!icachePort.sendTiming(ifetch_pkt)) {
+ // Need to wait for retry
+ _status = IcacheRetry;
+ } else {
+ // Need to wait for cache to respond
+ _status = IcacheWaitResponse;
+ // ownership of packet transferred to memory system
+ ifetch_pkt = NULL;
+ }
+ } else {
+ panic("TimingSimpleCPU fetch fault handling not implemented");
+ }
+}
+
+
+void
+TimingSimpleCPU::completeInst(Fault fault)
+{
+ postExecute();
+
+ if (traceData) {
+ traceData->finalize();
+ }
+
+ advancePC(fault);
+
+ if (_status == Running) {
+ // kick off fetch of next instruction... callback from icache
+ // response will cause that instruction to be executed,
+ // keeping the CPU running.
+ fetch();
+ }
+}
+
+
+void
+TimingSimpleCPU::completeIfetch()
+{
+ // received a response from the icache: execute the received
+ // instruction
+ assert(_status == IcacheWaitResponse);
+ _status = Running;
+ preExecute();
+ if (curStaticInst->isMemRef()) {
+ // load or store: just send to dcache
+ Fault fault = curStaticInst->initiateAcc(this, traceData);
+ assert(fault == NoFault);
+ assert(_status == DcacheWaitResponse);
+ // instruction will complete in dcache response callback
+ } else {
+ // non-memory instruction: execute completely now
+ Fault fault = curStaticInst->execute(this, traceData);
+ completeInst(fault);
+ }
+}
+
+
+bool
+TimingSimpleCPU::IcachePort::recvTiming(Packet *pkt)
+{
+ cpu->completeIfetch();
+ return true;
+}
+
+Packet *
+TimingSimpleCPU::IcachePort::recvRetry()
+{
+ // we shouldn't get a retry unless we have a packet that we're
+ // waiting to transmit
+ assert(cpu->ifetch_pkt != NULL);
+ assert(cpu->_status == IcacheRetry);
+ cpu->_status = IcacheWaitResponse;
+ Packet *tmp = cpu->ifetch_pkt;
+ cpu->ifetch_pkt = NULL;
+ return tmp;
+}
+
+void
+TimingSimpleCPU::completeDataAccess(Packet *pkt)
+{
+ // received a response from the dcache: complete the load or store
+ // instruction
+ assert(pkt->result == Success);
+ assert(_status == DcacheWaitResponse);
+ _status = Running;
+
+ Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
+
+ completeInst(fault);
+}
+
+
+
+bool
+TimingSimpleCPU::DcachePort::recvTiming(Packet *pkt)
+{
+ cpu->completeDataAccess(pkt);
+ return true;
+}
+
+Packet *
+TimingSimpleCPU::DcachePort::recvRetry()
+{
+ // we shouldn't get a retry unless we have a packet that we're
+ // waiting to transmit
+ assert(cpu->dcache_pkt != NULL);
+ assert(cpu->_status == DcacheRetry);
+ cpu->_status = DcacheWaitResponse;
+ Packet *tmp = cpu->dcache_pkt;
+ cpu->dcache_pkt = NULL;
+ return tmp;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+//
+// TimingSimpleCPU Simulation Object
+//
+BEGIN_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
+
+ Param<Counter> max_insts_any_thread;
+ Param<Counter> max_insts_all_threads;
+ Param<Counter> max_loads_any_thread;
+ Param<Counter> max_loads_all_threads;
+ SimObjectParam<MemObject *> mem;
+
+#if FULL_SYSTEM
+ SimObjectParam<AlphaITB *> itb;
+ SimObjectParam<AlphaDTB *> dtb;
+ SimObjectParam<System *> system;
+ Param<int> cpu_id;
+ Param<Tick> profile;
+#else
+ SimObjectParam<Process *> workload;
+#endif // FULL_SYSTEM
+
+ Param<int> clock;
+
+ Param<bool> defer_registration;
+ Param<int> width;
+ Param<bool> function_trace;
+ Param<Tick> function_trace_start;
+ Param<bool> simulate_stalls;
+
+END_DECLARE_SIM_OBJECT_PARAMS(TimingSimpleCPU)
+
+BEGIN_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
+
+ INIT_PARAM(max_insts_any_thread,
+ "terminate when any thread reaches this inst count"),
+ INIT_PARAM(max_insts_all_threads,
+ "terminate when all threads have reached this inst count"),
+ INIT_PARAM(max_loads_any_thread,
+ "terminate when any thread reaches this load count"),
+ INIT_PARAM(max_loads_all_threads,
+ "terminate when all threads have reached this load count"),
+ INIT_PARAM(mem, "memory"),
+
+#if FULL_SYSTEM
+ INIT_PARAM(itb, "Instruction TLB"),
+ INIT_PARAM(dtb, "Data TLB"),
+ INIT_PARAM(system, "system object"),
+ INIT_PARAM(cpu_id, "processor ID"),
+ INIT_PARAM(profile, ""),
+#else
+ INIT_PARAM(workload, "processes to run"),
+#endif // FULL_SYSTEM
+
+ INIT_PARAM(clock, "clock speed"),
+ INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
+ INIT_PARAM(width, "cpu width"),
+ INIT_PARAM(function_trace, "Enable function trace"),
+ INIT_PARAM(function_trace_start, "Cycle to start function trace"),
+ INIT_PARAM(simulate_stalls, "Simulate cache stall cycles")
+
+END_INIT_SIM_OBJECT_PARAMS(TimingSimpleCPU)
+
+
+CREATE_SIM_OBJECT(TimingSimpleCPU)
+{
+ TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
+ params->name = getInstanceName();
+ params->numberOfThreads = 1;
+ params->max_insts_any_thread = max_insts_any_thread;
+ params->max_insts_all_threads = max_insts_all_threads;
+ params->max_loads_any_thread = max_loads_any_thread;
+ params->max_loads_all_threads = max_loads_all_threads;
+ params->deferRegistration = defer_registration;
+ params->clock = clock;
+ params->functionTrace = function_trace;
+ params->functionTraceStart = function_trace_start;
+ params->mem = mem;
+
+#if FULL_SYSTEM
+ params->itb = itb;
+ params->dtb = dtb;
+ params->system = system;
+ params->cpu_id = cpu_id;
+ params->profile = profile;
+#else
+ params->process = workload;
+#endif
+
+ TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
+ return cpu;
+}
+
+REGISTER_SIM_OBJECT("TimingSimpleCPU", TimingSimpleCPU)
+
diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh
new file mode 100644
index 000000000..83be025d9
--- /dev/null
+++ b/src/cpu/simple/timing.hh
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_SIMPLE_TIMING_HH__
+#define __CPU_SIMPLE_TIMING_HH__
+
+#include "cpu/simple/base.hh"
+
+class TimingSimpleCPU : public BaseSimpleCPU
+{
+ public:
+
+ struct Params : public BaseSimpleCPU::Params {
+ };
+
+ TimingSimpleCPU(Params *params);
+ virtual ~TimingSimpleCPU();
+
+ virtual void init();
+
+ public:
+ //
+ enum Status {
+ Idle,
+ Running,
+ IcacheRetry,
+ IcacheWaitResponse,
+ IcacheWaitSwitch,
+ DcacheRetry,
+ DcacheWaitResponse,
+ DcacheWaitSwitch,
+ SwitchedOut
+ };
+
+ protected:
+ Status _status;
+
+ Status status() const { return _status; }
+
+ private:
+
+ class CpuPort : public Port
+ {
+ protected:
+ TimingSimpleCPU *cpu;
+
+ public:
+
+ CpuPort(TimingSimpleCPU *_cpu)
+ : cpu(_cpu)
+ { }
+
+ protected:
+
+ virtual Tick recvAtomic(Packet *pkt);
+
+ virtual void recvFunctional(Packet *pkt);
+
+ virtual void recvStatusChange(Status status);
+
+ virtual void getDeviceAddressRanges(AddrRangeList &resp,
+ AddrRangeList &snoop)
+ { resp.clear(); snoop.clear(); }
+ };
+
+ class IcachePort : public CpuPort
+ {
+ public:
+
+ IcachePort(TimingSimpleCPU *_cpu)
+ : CpuPort(_cpu)
+ { }
+
+ protected:
+
+ virtual bool recvTiming(Packet *pkt);
+
+ virtual Packet *recvRetry();
+ };
+
+ class DcachePort : public CpuPort
+ {
+ public:
+
+ DcachePort(TimingSimpleCPU *_cpu)
+ : CpuPort(_cpu)
+ { }
+
+ protected:
+
+ virtual bool recvTiming(Packet *pkt);
+
+ virtual Packet *recvRetry();
+ };
+
+ IcachePort icachePort;
+ DcachePort dcachePort;
+
+ Packet *ifetch_pkt;
+ Packet *dcache_pkt;
+
+ public:
+
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string &section);
+
+ void switchOut(Sampler *s);
+ void takeOverFrom(BaseCPU *oldCPU);
+
+ virtual void activateContext(int thread_num, int delay);
+ virtual void suspendContext(int thread_num);
+
+ template <class T>
+ Fault read(Addr addr, T &data, unsigned flags);
+
+ template <class T>
+ Fault write(T data, Addr addr, unsigned flags, uint64_t *res);
+
+ void fetch();
+ void completeInst(Fault fault);
+ void completeIfetch();
+ void completeDataAccess(Packet *);
+};
+
+#endif // __CPU_SIMPLE_TIMING_HH__