summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2008-10-09 04:58:24 -0700
committerNathan Binkert <nate@binkert.org>2008-10-09 04:58:24 -0700
commite06321091d4e931ff1a4d753e56d76f9746c3cd2 (patch)
tree75e2049ca5ffc65cbfaefa73804571aa933f015b /src/cpu
parent8291d9db0a0bdeecb2a13f28962893ed3659230e (diff)
downloadgem5-e06321091d4e931ff1a4d753e56d76f9746c3cd2.tar.xz
eventq: convert all usage of events to use the new API.
For now, there is still a single global event queue, but this is necessary for making the steps towards a parallelized m5.
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/base.cc76
-rw-r--r--src/cpu/base.hh2
-rw-r--r--src/cpu/cpuevent.hh9
-rw-r--r--src/cpu/memtest/memtest.cc4
-rw-r--r--src/cpu/memtest/memtest.hh6
-rw-r--r--src/cpu/o3/commit_impl.hh4
-rw-r--r--src/cpu/o3/cpu.cc17
-rw-r--r--src/cpu/o3/cpu.hh20
-rw-r--r--src/cpu/o3/fetch.hh2
-rw-r--r--src/cpu/o3/inst_queue_impl.hh9
-rw-r--r--src/cpu/o3/lsq.hh2
-rw-r--r--src/cpu/o3/lsq_unit.hh2
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh4
-rw-r--r--src/cpu/quiesce_event.cc2
-rw-r--r--src/cpu/simple/atomic.cc15
-rw-r--r--src/cpu/simple/timing.cc16
-rw-r--r--src/cpu/simple/timing.hh3
-rw-r--r--src/cpu/simple_thread.cc4
-rw-r--r--src/cpu/thread_state.cc2
19 files changed, 101 insertions, 98 deletions
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index 7b6404419..5a0359e53 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -60,13 +60,12 @@ vector<BaseCPU *> BaseCPU::cpuList;
// been initialized
int maxThreadsPerCPU = 1;
-CPUProgressEvent::CPUProgressEvent(EventQueue *q, Tick ival,
- BaseCPU *_cpu)
- : Event(q, Event::Progress_Event_Pri), interval(ival),
- lastNumInst(0), cpu(_cpu)
+CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
+ : Event(Event::Progress_Event_Pri), interval(ival), lastNumInst(0),
+ cpu(_cpu)
{
if (interval)
- schedule(curTick + interval);
+ cpu->schedule(this, curTick + interval);
}
void
@@ -84,7 +83,7 @@ CPUProgressEvent::process()
curTick, cpu->name(), temp - lastNumInst);
#endif
lastNumInst = temp;
- schedule(curTick + interval);
+ cpu->schedule(this, curTick + interval);
}
const char *
@@ -121,22 +120,26 @@ BaseCPU::BaseCPU(Params *p)
//
// set up instruction-count-based termination events, if any
//
- if (p->max_insts_any_thread != 0)
- for (int i = 0; i < number_of_threads; ++i)
- schedExitSimLoop("a thread reached the max instruction count",
- p->max_insts_any_thread, 0,
- comInstEventQueue[i]);
+ if (p->max_insts_any_thread != 0) {
+ const char *cause = "a thread reached the max instruction count";
+ for (int i = 0; i < number_of_threads; ++i) {
+ Event *event = new SimLoopExitEvent(cause, 0);
+ comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
+ }
+ }
if (p->max_insts_all_threads != 0) {
+ const char *cause = "all threads reached the max instruction count";
+
// allocate & initialize shared downcounter: each event will
// decrement this when triggered; simulation will terminate
// when counter reaches 0
int *counter = new int;
*counter = number_of_threads;
- for (int i = 0; i < number_of_threads; ++i)
- new CountedExitEvent(comInstEventQueue[i],
- "all threads reached the max instruction count",
- p->max_insts_all_threads, *counter);
+ for (int i = 0; i < number_of_threads; ++i) {
+ Event *event = new CountedExitEvent(cause, *counter);
+ comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
+ }
}
// allocate per-thread load-based event queues
@@ -147,22 +150,25 @@ BaseCPU::BaseCPU(Params *p)
//
// set up instruction-count-based termination events, if any
//
- if (p->max_loads_any_thread != 0)
- for (int i = 0; i < number_of_threads; ++i)
- schedExitSimLoop("a thread reached the max load count",
- p->max_loads_any_thread, 0,
- comLoadEventQueue[i]);
+ if (p->max_loads_any_thread != 0) {
+ const char *cause = "a thread reached the max load count";
+ for (int i = 0; i < number_of_threads; ++i) {
+ Event *event = new SimLoopExitEvent(cause, 0);
+ comLoadEventQueue[i]->schedule(event, p->max_loads_any_thread);
+ }
+ }
if (p->max_loads_all_threads != 0) {
+ const char *cause = "all threads reached the max load count";
// allocate & initialize shared downcounter: each event will
// decrement this when triggered; simulation will terminate
// when counter reaches 0
int *counter = new int;
*counter = number_of_threads;
- for (int i = 0; i < number_of_threads; ++i)
- new CountedExitEvent(comLoadEventQueue[i],
- "all threads reached the max load count",
- p->max_loads_all_threads, *counter);
+ for (int i = 0; i < number_of_threads; ++i) {
+ Event *event = new CountedExitEvent(cause, *counter);
+ comLoadEventQueue[i]->schedule(event, p->max_loads_all_threads);
+ }
}
functionTracingEnabled = false;
@@ -174,9 +180,9 @@ BaseCPU::BaseCPU(Params *p)
if (p->function_trace_start == 0) {
functionTracingEnabled = true;
} else {
- new EventWrapper<BaseCPU,
- &BaseCPU::enableFunctionTrace>(
- this, p->function_trace_start, true);
+ typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
+ Event *event = new wrap(this, true);
+ schedule(event, p->function_trace_start);
}
}
#if FULL_SYSTEM
@@ -209,13 +215,13 @@ BaseCPU::startup()
{
#if FULL_SYSTEM
if (!params()->defer_registration && profileEvent)
- profileEvent->schedule(curTick);
+ schedule(profileEvent, curTick);
#endif
if (params()->progress_interval) {
- new CPUProgressEvent(&mainEventQueue,
- ticks(params()->progress_interval),
- this);
+ Tick num_ticks = ticks(params()->progress_interval);
+ Event *event = new CPUProgressEvent(this, num_ticks);
+ schedule(event, curTick + num_ticks);
}
}
@@ -300,7 +306,7 @@ BaseCPU::switchOut()
// panic("This CPU doesn't support sampling!");
#if FULL_SYSTEM
if (profileEvent && profileEvent->scheduled())
- profileEvent->deschedule();
+ deschedule(profileEvent);
#endif
}
@@ -336,7 +342,7 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
threadContexts[i]->profileClear();
if (profileEvent)
- profileEvent->schedule(curTick);
+ schedule(profileEvent, curTick);
#endif
// Connect new CPU to old CPU's memory only if new CPU isn't
@@ -358,7 +364,7 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
#if FULL_SYSTEM
BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
- : Event(&mainEventQueue), cpu(_cpu), interval(_interval)
+ : cpu(_cpu), interval(_interval)
{ }
void
@@ -369,7 +375,7 @@ BaseCPU::ProfileEvent::process()
tc->profileSample();
}
- schedule(curTick + interval);
+ cpu->schedule(this, curTick + interval);
}
void
diff --git a/src/cpu/base.hh b/src/cpu/base.hh
index 251adc1b7..c2b78a675 100644
--- a/src/cpu/base.hh
+++ b/src/cpu/base.hh
@@ -65,7 +65,7 @@ class CPUProgressEvent : public Event
BaseCPU *cpu;
public:
- CPUProgressEvent(EventQueue *q, Tick ival, BaseCPU *_cpu);
+ CPUProgressEvent(BaseCPU *_cpu, Tick ival);
void process();
diff --git a/src/cpu/cpuevent.hh b/src/cpu/cpuevent.hh
index 5816c6ca1..65f0e87e1 100644
--- a/src/cpu/cpuevent.hh
+++ b/src/cpu/cpuevent.hh
@@ -58,8 +58,8 @@ class CpuEvent : public Event
ThreadContext *tc;
public:
- CpuEvent(EventQueue *q, ThreadContext *_tc, Priority p = Default_Pri)
- : Event(q, p), tc(_tc)
+ CpuEvent(ThreadContext *_tc, Priority p = Default_Pri)
+ : Event(p), tc(_tc)
{ cpuEventList.push_back(this); }
/** delete the cpu event from the global list. */
@@ -81,9 +81,8 @@ class CpuEventWrapper : public CpuEvent
T *object;
public:
- CpuEventWrapper(T *obj, ThreadContext *_tc,
- EventQueue *q = &mainEventQueue, Priority p = Default_Pri)
- : CpuEvent(q, _tc, p), object(obj)
+ CpuEventWrapper(T *obj, ThreadContext *_tc, Priority p = Default_Pri)
+ : CpuEvent(_tc, p), object(obj)
{ }
void process() { (object->*F)(tc); }
};
diff --git a/src/cpu/memtest/memtest.cc b/src/cpu/memtest/memtest.cc
index 42889163a..9e5b9d099 100644
--- a/src/cpu/memtest/memtest.cc
+++ b/src/cpu/memtest/memtest.cc
@@ -152,7 +152,7 @@ MemTest::MemTest(const Params *p)
// set up counters
noResponseCycles = 0;
numReads = 0;
- tickEvent.schedule(0);
+ schedule(tickEvent, 0);
id = TESTER_ALLOCATOR++;
@@ -262,7 +262,7 @@ void
MemTest::tick()
{
if (!tickEvent.scheduled())
- tickEvent.schedule(curTick + ticks(1));
+ schedule(tickEvent, curTick + ticks(1));
if (++noResponseCycles >= 500000) {
cerr << name() << ": deadlocked at cycle " << curTick << endl;
diff --git a/src/cpu/memtest/memtest.hh b/src/cpu/memtest/memtest.hh
index 5a7e0b9ae..82438b41f 100644
--- a/src/cpu/memtest/memtest.hh
+++ b/src/cpu/memtest/memtest.hh
@@ -74,10 +74,10 @@ class MemTest : public MemObject
{
private:
MemTest *cpu;
+
public:
- TickEvent(MemTest *c)
- : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) {}
- void process() {cpu->tick();}
+ TickEvent(MemTest *c) : Event(CPU_Tick_Pri), cpu(c) {}
+ void process() { cpu->tick(); }
virtual const char *description() const { return "MemTest tick"; }
};
diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh
index b16955691..68fc6ef3b 100644
--- a/src/cpu/o3/commit_impl.hh
+++ b/src/cpu/o3/commit_impl.hh
@@ -51,7 +51,7 @@
template <class Impl>
DefaultCommit<Impl>::TrapEvent::TrapEvent(DefaultCommit<Impl> *_commit,
unsigned _tid)
- : Event(&mainEventQueue, CPU_Tick_Pri), commit(_commit), tid(_tid)
+ : Event(CPU_Tick_Pri), commit(_commit), tid(_tid)
{
this->setFlags(Event::AutoDelete);
}
@@ -462,7 +462,7 @@ DefaultCommit<Impl>::generateTrapEvent(unsigned tid)
TrapEvent *trap = new TrapEvent(this, tid);
- trap->schedule(curTick + trapLatency);
+ cpu->schedule(trap, curTick + trapLatency);
trapInFlight[tid] = true;
}
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 18b417141..eb1115565 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -74,7 +74,7 @@ BaseO3CPU::regStats()
template <class Impl>
FullO3CPU<Impl>::TickEvent::TickEvent(FullO3CPU<Impl> *c)
- : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
+ : Event(CPU_Tick_Pri), cpu(c)
{
}
@@ -94,7 +94,7 @@ FullO3CPU<Impl>::TickEvent::description() const
template <class Impl>
FullO3CPU<Impl>::ActivateThreadEvent::ActivateThreadEvent()
- : Event(&mainEventQueue, CPU_Switch_Pri)
+ : Event(CPU_Switch_Pri)
{
}
@@ -123,7 +123,7 @@ FullO3CPU<Impl>::ActivateThreadEvent::description() const
template <class Impl>
FullO3CPU<Impl>::DeallocateContextEvent::DeallocateContextEvent()
- : Event(&mainEventQueue, CPU_Tick_Pri), tid(0), remove(false), cpu(NULL)
+ : Event(CPU_Tick_Pri), tid(0), remove(false), cpu(NULL)
{
}
@@ -576,7 +576,7 @@ FullO3CPU<Impl>::tick()
lastRunningCycle = curTick;
timesIdled++;
} else {
- tickEvent.schedule(nextCycle(curTick + ticks(1)));
+ schedule(tickEvent, nextCycle(curTick + ticks(1)));
DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
@@ -584,7 +584,6 @@ FullO3CPU<Impl>::tick()
#if !FULL_SYSTEM
updateThreadPriority();
#endif
-
}
template <class Impl>
@@ -1121,7 +1120,7 @@ FullO3CPU<Impl>::resume()
#endif
if (!tickEvent.scheduled())
- tickEvent.schedule(nextCycle());
+ schedule(tickEvent, nextCycle());
_status = Running;
}
@@ -1214,11 +1213,11 @@ FullO3CPU<Impl>::takeOverFrom(BaseCPU *oldCPU)
ThreadContext *tc = threadContexts[i];
if (tc->status() == ThreadContext::Active && _status != Running) {
_status = Running;
- tickEvent.schedule(nextCycle());
+ schedule(tickEvent, nextCycle());
}
}
if (!tickEvent.scheduled())
- tickEvent.schedule(nextCycle());
+ schedule(tickEvent, nextCycle());
}
template <class Impl>
@@ -1687,7 +1686,7 @@ FullO3CPU<Impl>::wakeCPU()
idleCycles += tickToCycles((curTick - 1) - lastRunningCycle);
numCycles += tickToCycles((curTick - 1) - lastRunningCycle);
- tickEvent.schedule(nextCycle());
+ schedule(tickEvent, nextCycle());
}
template <class Impl>
diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh
index 065dd10a0..406d965be 100644
--- a/src/cpu/o3/cpu.hh
+++ b/src/cpu/o3/cpu.hh
@@ -148,9 +148,9 @@ class FullO3CPU : public BaseO3CPU
void scheduleTickEvent(int delay)
{
if (tickEvent.squashed())
- tickEvent.reschedule(nextCycle(curTick + ticks(delay)));
+ reschedule(tickEvent, nextCycle(curTick + ticks(delay)));
else if (!tickEvent.scheduled())
- tickEvent.schedule(nextCycle(curTick + ticks(delay)));
+ schedule(tickEvent, nextCycle(curTick + ticks(delay)));
}
/** Unschedule tick event, regardless of its current state. */
@@ -188,11 +188,11 @@ class FullO3CPU : public BaseO3CPU
{
// Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed())
- activateThreadEvent[tid].
- reschedule(nextCycle(curTick + ticks(delay)));
+ reschedule(activateThreadEvent[tid],
+ nextCycle(curTick + ticks(delay)));
else if (!activateThreadEvent[tid].scheduled())
- activateThreadEvent[tid].
- schedule(nextCycle(curTick + ticks(delay)));
+ schedule(activateThreadEvent[tid],
+ nextCycle(curTick + ticks(delay)));
}
/** Unschedule actiavte thread event, regardless of its current state. */
@@ -246,11 +246,11 @@ class FullO3CPU : public BaseO3CPU
{
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
- deallocateContextEvent[tid].
- reschedule(nextCycle(curTick + ticks(delay)));
+ reschedule(deallocateContextEvent[tid],
+ nextCycle(curTick + ticks(delay)));
else if (!deallocateContextEvent[tid].scheduled())
- deallocateContextEvent[tid].
- schedule(nextCycle(curTick + ticks(delay)));
+ schedule(deallocateContextEvent[tid],
+ nextCycle(curTick + ticks(delay)));
}
/** Unschedule thread deallocation in CPU */
diff --git a/src/cpu/o3/fetch.hh b/src/cpu/o3/fetch.hh
index f12228ff9..9886b6675 100644
--- a/src/cpu/o3/fetch.hh
+++ b/src/cpu/o3/fetch.hh
@@ -82,7 +82,7 @@ class DefaultFetch
public:
/** Default constructor. */
IcachePort(DefaultFetch<Impl> *_fetch)
- : Port(_fetch->name() + "-iport"), fetch(_fetch)
+ : Port(_fetch->name() + "-iport", _fetch->cpu), fetch(_fetch)
{ }
bool snoopRangeSent;
diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh
index f3ce770fa..1d0f4b9f6 100644
--- a/src/cpu/o3/inst_queue_impl.hh
+++ b/src/cpu/o3/inst_queue_impl.hh
@@ -41,10 +41,9 @@
template <class Impl>
InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
- int fu_idx,
- InstructionQueue<Impl> *iq_ptr)
- : Event(&mainEventQueue, Stat_Event_Pri),
- inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
+ int fu_idx, InstructionQueue<Impl> *iq_ptr)
+ : Event(Stat_Event_Pri), inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr),
+ freeFU(false)
{
this->setFlags(Event::AutoDelete);
}
@@ -754,7 +753,7 @@ InstructionQueue<Impl>::scheduleReadyInsts()
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- execution->schedule(curTick + cpu->ticks(op_latency - 1));
+ cpu->schedule(execution, curTick + cpu->ticks(op_latency - 1));
// @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) {
diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh
index 44b69ab40..f8a825726 100644
--- a/src/cpu/o3/lsq.hh
+++ b/src/cpu/o3/lsq.hh
@@ -298,7 +298,7 @@ class LSQ {
public:
/** Default constructor. */
DcachePort(LSQ *_lsq)
- : Port(_lsq->name() + "-dport"), lsq(_lsq)
+ : Port(_lsq->name() + "-dport", _lsq->cpu), lsq(_lsq)
{ }
bool snoopRangeSent;
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index a82ba3ad5..8af8f18e6 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -584,7 +584,7 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
// We'll say this has a 1 cycle load-store forwarding latency
// for now.
// @todo: Need to make this a parameter.
- wb->schedule(curTick);
+ cpu->schedule(wb, curTick);
++lsqForwLoads;
return NoFault;
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index 4b8d693a6..85662d496 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -45,7 +45,7 @@
template<class Impl>
LSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
LSQUnit *lsq_ptr)
- : Event(&mainEventQueue), inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
+ : inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
{
this->setFlags(Event::AutoDelete);
}
@@ -684,7 +684,7 @@ LSQUnit<Impl>::writebackStores()
"Instantly completing it.\n",
inst->seqNum);
WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
- wb->schedule(curTick + 1);
+ cpu->schedule(wb, curTick + 1);
completeStore(storeWBIdx);
incrStIdx(storeWBIdx);
continue;
diff --git a/src/cpu/quiesce_event.cc b/src/cpu/quiesce_event.cc
index 81384d529..38ffb74e4 100644
--- a/src/cpu/quiesce_event.cc
+++ b/src/cpu/quiesce_event.cc
@@ -33,7 +33,7 @@
#include "cpu/quiesce_event.hh"
EndQuiesceEvent::EndQuiesceEvent(ThreadContext *_tc)
- : Event(&mainEventQueue), tc(_tc)
+ : tc(_tc)
{
}
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 7ed1ee0c3..154a66162 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -43,7 +43,7 @@ using namespace std;
using namespace TheISA;
AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
- : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c)
+ : Event(CPU_Tick_Pri), cpu(c)
{
}
@@ -201,9 +201,8 @@ AtomicSimpleCPU::resume()
changeState(SimObject::Running);
if (thread->status() == ThreadContext::Active) {
- if (!tickEvent.scheduled()) {
- tickEvent.schedule(nextCycle());
- }
+ if (!tickEvent.scheduled())
+ schedule(tickEvent, nextCycle());
}
}
@@ -230,7 +229,7 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
ThreadContext *tc = threadContexts[i];
if (tc->status() == ThreadContext::Active && _status != Running) {
_status = Running;
- tickEvent.schedule(nextCycle());
+ schedule(tickEvent, nextCycle());
break;
}
}
@@ -260,7 +259,7 @@ AtomicSimpleCPU::activateContext(int thread_num, int delay)
numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles
- tickEvent.schedule(nextCycle(curTick + ticks(delay)));
+ schedule(tickEvent, nextCycle(curTick + ticks(delay)));
_status = Running;
}
@@ -278,7 +277,7 @@ AtomicSimpleCPU::suspendContext(int thread_num)
// tick event may not be scheduled if this gets called from inside
// an instruction's execution, e.g. "quiesce"
if (tickEvent.scheduled())
- tickEvent.deschedule();
+ deschedule(tickEvent);
notIdleFraction--;
_status = Idle;
@@ -794,7 +793,7 @@ AtomicSimpleCPU::tick()
latency = ticks(1);
if (_status != Idle)
- tickEvent.schedule(curTick + latency);
+ schedule(tickEvent, curTick + latency);
}
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index ac67341ff..c4635d6a3 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -101,7 +101,7 @@ void
TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
{
pkt = _pkt;
- Event::schedule(t);
+ cpu->schedule(this, t);
}
TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
@@ -165,7 +165,7 @@ TimingSimpleCPU::resume()
// Delete the old event if it existed.
if (fetchEvent) {
if (fetchEvent->scheduled())
- fetchEvent->deschedule();
+ deschedule(fetchEvent);
delete fetchEvent;
}
@@ -186,7 +186,7 @@ TimingSimpleCPU::switchOut()
// If we've been scheduled to resume but are then told to switch out,
// we'll need to cancel it.
if (fetchEvent && fetchEvent->scheduled())
- fetchEvent->deschedule();
+ deschedule(fetchEvent);
}
@@ -228,7 +228,8 @@ TimingSimpleCPU::activateContext(int thread_num, int delay)
_status = Running;
// kick things off by initiating the fetch of the next instruction
- fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay)));
+ fetchEvent = new FetchEvent(this);
+ schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
}
@@ -819,10 +820,11 @@ TimingSimpleCPU::DcachePort::recvRetry()
}
}
-TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t)
- : Event(&mainEventQueue), pkt(_pkt), cpu(_cpu)
+TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
+ Tick t)
+ : pkt(_pkt), cpu(_cpu)
{
- schedule(t);
+ cpu->schedule(this, t);
}
void
diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh
index e405f6a41..081051ea7 100644
--- a/src/cpu/simple/timing.hh
+++ b/src/cpu/simple/timing.hh
@@ -80,8 +80,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
PacketPtr pkt;
TimingSimpleCPU *cpu;
- TickEvent(TimingSimpleCPU *_cpu)
- :Event(&mainEventQueue), cpu(_cpu) {}
+ TickEvent(TimingSimpleCPU *_cpu) : cpu(_cpu) {}
const char *description() const { return "Timing CPU tick"; }
void schedule(PacketPtr _pkt, Tick t);
};
diff --git a/src/cpu/simple_thread.cc b/src/cpu/simple_thread.cc
index 0124184e0..42da659f2 100644
--- a/src/cpu/simple_thread.cc
+++ b/src/cpu/simple_thread.cc
@@ -189,7 +189,7 @@ void
SimpleThread::serialize(ostream &os)
{
ThreadState::serialize(os);
- regs.serialize(os);
+ regs.serialize(cpu, os);
// thread_num and cpu_id are deterministic from the config
}
@@ -198,7 +198,7 @@ void
SimpleThread::unserialize(Checkpoint *cp, const std::string &section)
{
ThreadState::unserialize(cp, section);
- regs.unserialize(cp, section);
+ regs.unserialize(cpu, cp, section);
// thread_num and cpu_id are deterministic from the config
}
diff --git a/src/cpu/thread_state.cc b/src/cpu/thread_state.cc
index 18845baf7..8d519c563 100644
--- a/src/cpu/thread_state.cc
+++ b/src/cpu/thread_state.cc
@@ -105,7 +105,7 @@ ThreadState::unserialize(Checkpoint *cp, const std::string &section)
Tick quiesceEndTick;
UNSERIALIZE_SCALAR(quiesceEndTick);
if (quiesceEndTick)
- quiesceEvent->schedule(quiesceEndTick);
+ baseCpu->schedule(quiesceEvent, quiesceEndTick);
if (kernelStats)
kernelStats->unserialize(cp, section);
#endif