summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/arm/table_walker.cc2
-rw-r--r--src/arch/x86/mmapped_ipr.hh4
-rw-r--r--src/cpu/base.cc6
-rw-r--r--src/cpu/inorder/cpu.cc31
-rw-r--r--src/cpu/inorder/cpu.hh2
-rw-r--r--src/cpu/inorder/resource.cc2
-rw-r--r--src/cpu/inorder/resource_pool.cc4
-rw-r--r--src/cpu/o3/O3CPU.py4
-rw-r--r--src/cpu/o3/commit.hh2
-rw-r--r--src/cpu/o3/commit_impl.hh3
-rw-r--r--src/cpu/o3/cpu.cc24
-rw-r--r--src/cpu/o3/cpu.hh12
-rw-r--r--src/cpu/o3/fetch_impl.hh2
-rw-r--r--src/cpu/o3/inst_queue_impl.hh2
-rw-r--r--src/cpu/o3/lsq_unit.hh2
-rw-r--r--src/cpu/simple/atomic.cc10
-rw-r--r--src/cpu/simple/timing.cc42
-rw-r--r--src/cpu/simple/timing.hh2
-rw-r--r--src/cpu/testers/memtest/memtest.cc2
-rw-r--r--src/cpu/testers/networktest/networktest.cc2
-rw-r--r--src/dev/arm/pl111.cc16
-rw-r--r--src/dev/i8254xGBe.cc4
-rw-r--r--src/dev/ns_gige.cc6
-rw-r--r--src/sim/clocked_object.hh89
24 files changed, 165 insertions, 110 deletions
diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc
index ea71e6f1c..ffa193fbe 100644
--- a/src/arch/arm/table_walker.cc
+++ b/src/arch/arm/table_walker.cc
@@ -758,7 +758,7 @@ void
TableWalker::nextWalk(ThreadContext *tc)
{
if (pendingQueue.size())
- schedule(doProcessEvent, tc->getCpuPtr()->nextCycle(curTick()+1));
+ schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(1));
}
diff --git a/src/arch/x86/mmapped_ipr.hh b/src/arch/x86/mmapped_ipr.hh
index 4c3292388..f17b64cad 100644
--- a/src/arch/x86/mmapped_ipr.hh
+++ b/src/arch/x86/mmapped_ipr.hh
@@ -62,7 +62,7 @@ namespace X86ISA
// Make sure we don't trot off the end of data.
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->setData(((uint8_t *)&data) + offset);
- return xc->getCpuPtr()->ticks(1);
+ return 1;
}
inline Tick
@@ -76,7 +76,7 @@ namespace X86ISA
assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->writeData(((uint8_t *)&data) + offset);
xc->setMiscReg(index, gtoh(data));
- return xc->getCpuPtr()->ticks(1);
+ return 1;
}
}
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index ff832f562..98ddde293 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -91,7 +91,7 @@ CPUProgressEvent::process()
{
Counter temp = cpu->totalOps();
#ifndef NDEBUG
- double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
+ double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
"%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
@@ -261,9 +261,7 @@ BaseCPU::startup()
}
if (params()->progress_interval) {
- Tick num_ticks = ticks(params()->progress_interval);
-
- new CPUProgressEvent(this, num_ticks);
+ new CPUProgressEvent(this, params()->progress_interval);
}
}
diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc
index 4aff0c579..ec06f19f0 100644
--- a/src/cpu/inorder/cpu.cc
+++ b/src/cpu/inorder/cpu.cc
@@ -212,7 +212,7 @@ void
InOrderCPU::CPUEvent::scheduleEvent(int delay)
{
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->clockEdge(delay), true);
}
void
@@ -401,7 +401,7 @@ InOrderCPU::InOrderCPU(Params *params)
frontEndSked = createFrontEndSked();
faultSked = createFaultSked();
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
lockAddr = 0;
lockFlag = false;
@@ -761,17 +761,17 @@ InOrderCPU::tick()
if (!tickEvent.scheduled()) {
if (_status == SwitchedOut) {
// increment stat
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
} else if (!activityRec.active()) {
DPRINTF(InOrderCPU, "sleeping CPU.\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
timesIdled++;
} else {
//Tick next_tick = curTick() + cycles(1);
//tickEvent.schedule(next_tick);
- schedule(&tickEvent, nextCycle(curTick() + 1));
+ schedule(&tickEvent, clockEdge(1));
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
- nextCycle(curTick() + 1));
+ clockEdge(1));
}
}
@@ -959,15 +959,10 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
event_pri);
- Tick sked_tick = nextCycle(curTick() + ticks(delay));
- if (delay >= 0) {
- DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
- eventNames[c_event], curTick() + delay, tid);
- schedule(cpu_event, sked_tick);
- } else {
- cpu_event->process();
- cpuEventRemoveList.push(cpu_event);
- }
+ Tick sked_tick = clockEdge(delay);
+ DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
+ eventNames[c_event], curTick() + delay, tid);
+ schedule(cpu_event, sked_tick);
// Broadcast event to the Resource Pool
// Need to reset tid just in case this is a dummy instruction
@@ -1696,7 +1691,9 @@ InOrderCPU::wakeCPU()
DPRINTF(Activity, "Waking up CPU\n");
- Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick extra_cycles = curCycle() - lastRunningCycle;
+ if (extra_cycles != 0)
+ --extra_cycles;
idleCycles += extra_cycles;
for (int stage_num = 0; stage_num < NumStages; stage_num++) {
@@ -1705,7 +1702,7 @@ InOrderCPU::wakeCPU()
numCycles += extra_cycles;
- schedule(&tickEvent, nextCycle(curTick()));
+ schedule(&tickEvent, nextCycle());
}
// Lots of copied full system code...place into BaseCPU class?
diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh
index 3103201dd..9a0a62c87 100644
--- a/src/cpu/inorder/cpu.hh
+++ b/src/cpu/inorder/cpu.hh
@@ -204,7 +204,7 @@ class InOrderCPU : public BaseCPU
void scheduleTickEvent(int delay)
{
assert(!tickEvent.scheduled() || tickEvent.squashed());
- reschedule(&tickEvent, nextCycle(curTick() + ticks(delay)), true);
+ reschedule(&tickEvent, clockEdge(delay), true);
}
/** Unschedule tick event, regardless of its current state. */
diff --git a/src/cpu/inorder/resource.cc b/src/cpu/inorder/resource.cc
index 2ddce13c3..098a4e1b4 100644
--- a/src/cpu/inorder/resource.cc
+++ b/src/cpu/inorder/resource.cc
@@ -372,7 +372,7 @@ Resource::ticks(int num_cycles)
void
Resource::scheduleExecution(int slot_num)
{
- if (latency >= 1) {
+ if (latency > 0) {
scheduleEvent(slot_num, latency);
} else {
execute(slot_num);
diff --git a/src/cpu/inorder/resource_pool.cc b/src/cpu/inorder/resource_pool.cc
index 4c01165b8..31511314e 100644
--- a/src/cpu/inorder/resource_pool.cc
+++ b/src/cpu/inorder/resource_pool.cc
@@ -238,7 +238,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
{
assert(delay >= 0);
- Tick when = cpu->nextCycle(curTick() + cpu->ticks(delay));
+ Tick when = cpu->clockEdge(delay);
switch ((int)e_type)
{
@@ -460,7 +460,7 @@ ResourcePool::ResPoolEvent::scheduleEvent(int delay)
{
InOrderCPU *cpu = resPool->cpu;
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->clockEdge(delay), true);
}
/** Unschedule resource event, regardless of its current state. */
diff --git a/src/cpu/o3/O3CPU.py b/src/cpu/o3/O3CPU.py
index 3138aebbf..9306cb44e 100644
--- a/src/cpu/o3/O3CPU.py
+++ b/src/cpu/o3/O3CPU.py
@@ -76,8 +76,8 @@ class DerivO3CPU(BaseCPU):
renameToROBDelay = Param.Unsigned(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
- trapLatency = Param.Tick(13, "Trap latency")
- fetchTrapLatency = Param.Tick(1, "Fetch trap latency")
+ trapLatency = Param.Unsigned(13, "Trap latency")
+ fetchTrapLatency = Param.Unsigned(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
diff --git a/src/cpu/o3/commit.hh b/src/cpu/o3/commit.hh
index b5539c702..1119e8b50 100644
--- a/src/cpu/o3/commit.hh
+++ b/src/cpu/o3/commit.hh
@@ -409,7 +409,7 @@ class DefaultCommit
/** The latency to handle a trap. Used when scheduling trap
* squash event.
*/
- Tick trapLatency;
+ uint trapLatency;
/** The interrupt fault. */
Fault interrupt;
diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh
index 31398c3d9..66474c05f 100644
--- a/src/cpu/o3/commit_impl.hh
+++ b/src/cpu/o3/commit_impl.hh
@@ -374,7 +374,6 @@ DefaultCommit<Impl>::initStage()
cpu->activateStage(O3CPU::CommitIdx);
cpu->activityThisCycle();
- trapLatency = cpu->ticks(trapLatency);
}
template <class Impl>
@@ -509,7 +508,7 @@ DefaultCommit<Impl>::generateTrapEvent(ThreadID tid)
TrapEvent *trap = new TrapEvent(this, tid);
- cpu->schedule(trap, curTick() + trapLatency);
+ cpu->schedule(trap, cpu->clockEdge(trapLatency));
trapInFlight[tid] = true;
thread[tid]->trapPending = true;
}
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 71d04740c..95683a77a 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -386,7 +386,7 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
// Setup the ROB for whichever stages need it.
commit.setROB(&rob);
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
lastActivatedCycle = 0;
#if 0
@@ -623,13 +623,13 @@ FullO3CPU<Impl>::tick()
getState() == SimObject::Drained) {
DPRINTF(O3CPU, "Switched out!\n");
// increment stat
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
} else if (!activityRec.active() || _status == Idle) {
DPRINTF(O3CPU, "Idle!\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
timesIdled++;
} else {
- schedule(tickEvent, nextCycle(curTick() + ticks(1)));
+ schedule(tickEvent, clockEdge(1));
DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
@@ -762,7 +762,10 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
activityRec.activity();
fetch.wakeFromQuiesce();
- quiesceCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick cycles = curCycle() - lastRunningCycle;
+ if (cycles != 0)
+ --cycles;
+ quiesceCycles += cycles;
lastActivatedCycle = curTick();
@@ -801,7 +804,7 @@ FullO3CPU<Impl>::suspendContext(ThreadID tid)
unscheduleTickEvent();
DPRINTF(Quiesce, "Suspending Context\n");
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
_status = Idle;
}
@@ -1275,7 +1278,7 @@ FullO3CPU<Impl>::takeOverFrom(BaseCPU *oldCPU)
if (!tickEvent.scheduled())
schedule(tickEvent, nextCycle());
- lastRunningCycle = curTick();
+ lastRunningCycle = curCycle();
}
template <class Impl>
@@ -1669,8 +1672,11 @@ FullO3CPU<Impl>::wakeCPU()
DPRINTF(Activity, "Waking up CPU\n");
- idleCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
- numCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
+ Tick cycles = curCycle() - lastRunningCycle;
+ if (cycles != 0)
+ --cycles;
+ idleCycles += cycles;
+ numCycles += cycles;
schedule(tickEvent, nextCycle());
}
diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh
index b1fd12a2e..5910f314d 100644
--- a/src/cpu/o3/cpu.hh
+++ b/src/cpu/o3/cpu.hh
@@ -214,9 +214,9 @@ class FullO3CPU : public BaseO3CPU
void scheduleTickEvent(int delay)
{
if (tickEvent.squashed())
- reschedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ reschedule(tickEvent, clockEdge(delay));
else if (!tickEvent.scheduled())
- schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(tickEvent, clockEdge(delay));
}
/** Unschedule tick event, regardless of its current state. */
@@ -256,9 +256,9 @@ class FullO3CPU : public BaseO3CPU
// Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed())
reschedule(activateThreadEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
else if (!activateThreadEvent[tid].scheduled()) {
- Tick when = nextCycle(curTick() + ticks(delay));
+ Tick when = clockEdge(delay);
// Check if the deallocateEvent is also scheduled, and make
// sure they do not happen at same time causing a sleep that
@@ -319,10 +319,10 @@ class FullO3CPU : public BaseO3CPU
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
reschedule(deallocateContextEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
else if (!deallocateContextEvent[tid].scheduled())
schedule(deallocateContextEvent[tid],
- nextCycle(curTick() + ticks(delay)));
+ clockEdge(delay));
}
/** Unschedule thread deallocation in CPU */
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index caafa3fe3..9caf0c79b 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -646,7 +646,7 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
assert(!finishTranslationEvent.scheduled());
finishTranslationEvent.setFault(fault);
finishTranslationEvent.setReq(mem_req);
- cpu->schedule(finishTranslationEvent, cpu->nextCycle(curTick() + cpu->ticks(1)));
+ cpu->schedule(finishTranslationEvent, cpu->clockEdge(1));
return;
}
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh
index ae5f93c38..b6c3bd239 100644
--- a/src/cpu/o3/inst_queue_impl.hh
+++ b/src/cpu/o3/inst_queue_impl.hh
@@ -828,7 +828,7 @@ InstructionQueue<Impl>::scheduleReadyInsts()
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1));
+ cpu->schedule(execution, cpu->clockEdge(op_latency - 1));
// @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) {
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index b886a2259..c567341c7 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -632,7 +632,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
delete snd_data_pkt;
}
WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
- cpu->schedule(wb, curTick() + delay);
+ cpu->schedule(wb, cpu->clockEdge(delay));
return NoFault;
}
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index fc6724939..d1b0391fc 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -208,10 +208,10 @@ AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay)
assert(!tickEvent.scheduled());
notIdleFraction++;
- numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
+ numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles
- schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(tickEvent, clockEdge(delay));
_status = Running;
}
@@ -518,7 +518,7 @@ AtomicSimpleCPU::tick()
stall_ticks += dcache_latency;
if (stall_ticks) {
- Tick stall_cycles = stall_ticks / ticks(1);
+ Tick stall_cycles = stall_ticks / clockPeriod();
Tick aligned_stall_ticks = ticks(stall_cycles);
if (aligned_stall_ticks < stall_ticks)
@@ -533,8 +533,8 @@ AtomicSimpleCPU::tick()
}
// instruction takes at least one cycle
- if (latency < ticks(1))
- latency = ticks(1);
+ if (latency < clockPeriod())
+ latency = clockPeriod();
if (_status != Idle)
schedule(tickEvent, curTick() + latency);
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 9022845ce..5437e77aa 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -87,13 +87,11 @@ TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
: BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
- dcachePort(this), fetchEvent(this)
+ dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
+ fetchEvent(this)
{
_status = Idle;
- ifetch_pkt = dcache_pkt = NULL;
- drainEvent = NULL;
- previousTick = 0;
changeState(SimObject::Running);
system->totalNumInsts = 0;
}
@@ -156,7 +154,7 @@ TimingSimpleCPU::switchOut()
{
assert(_status == Running || _status == Idle);
_status = SwitchedOut;
- numCycles += tickToCycles(curTick() - previousTick);
+ numCycles += curCycle() - previousCycle;
// If we've been scheduled to resume but are then told to switch out,
// we'll need to cancel it.
@@ -184,7 +182,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
_status = Idle;
}
assert(threadContexts.size() == 1);
- previousTick = curTick();
+ previousCycle = curCycle();
}
@@ -202,7 +200,7 @@ TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
_status = Running;
// kick things off by initiating the fetch of the next instruction
- schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
+ schedule(fetchEvent, clockEdge(delay));
}
@@ -231,9 +229,8 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
{
RequestPtr req = pkt->req;
if (req->isMmappedIpr()) {
- Tick delay;
- delay = TheISA::handleIprRead(thread->getTC(), pkt);
- new IprEvent(pkt, this, nextCycle(curTick() + delay));
+ Tick delay = TheISA::handleIprRead(thread->getTC(), pkt);
+ new IprEvent(pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTimingReq(pkt)) {
@@ -322,8 +319,8 @@ TimingSimpleCPU::translationFault(Fault fault)
{
// fault may be NoFault in cases where a fault is suppressed,
// for instance prefetches.
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (traceData) {
// Since there was a fault, we shouldn't trace this instruction.
@@ -446,9 +443,8 @@ TimingSimpleCPU::handleWritePacket()
{
RequestPtr req = dcache_pkt->req;
if (req->isMmappedIpr()) {
- Tick delay;
- delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
- new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
+ Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
+ new IprEvent(dcache_pkt, this, clockEdge(delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTimingReq(dcache_pkt)) {
@@ -567,8 +563,8 @@ TimingSimpleCPU::fetch()
_status = IcacheWaitResponse;
completeIfetch(NULL);
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
}
}
@@ -600,8 +596,8 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
advanceInst(fault);
}
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
}
@@ -647,8 +643,8 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
_status = Running;
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (getState() == SimObject::Draining) {
if (pkt) {
@@ -754,8 +750,8 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
pkt->req->getFlags().isSet(Request::NO_ACCESS));
- numCycles += tickToCycles(curTick() - previousTick);
- previousTick = curTick();
+ numCycles += curCycle() - previousCycle;
+ previousCycle = curCycle();
if (pkt->senderState) {
SplitFragmentSenderState * send_state =
diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh
index b6b78c5db..c4d1573af 100644
--- a/src/cpu/simple/timing.hh
+++ b/src/cpu/simple/timing.hh
@@ -234,7 +234,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
PacketPtr ifetch_pkt;
PacketPtr dcache_pkt;
- Tick previousTick;
+ Tick previousCycle;
protected:
diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc
index 642af4677..f8a8b66bd 100644
--- a/src/cpu/testers/memtest/memtest.cc
+++ b/src/cpu/testers/memtest/memtest.cc
@@ -246,7 +246,7 @@ void
MemTest::tick()
{
if (!tickEvent.scheduled())
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, clockEdge(1));
if (++noResponseCycles >= 500000) {
if (issueDmas) {
diff --git a/src/cpu/testers/networktest/networktest.cc b/src/cpu/testers/networktest/networktest.cc
index 5d0e8e0c9..05568b3c0 100644
--- a/src/cpu/testers/networktest/networktest.cc
+++ b/src/cpu/testers/networktest/networktest.cc
@@ -165,7 +165,7 @@ NetworkTest::tick()
exitSimLoop("Network Tester completed simCycles");
else {
if (!tickEvent.scheduled())
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, clockEdge(1));
}
}
diff --git a/src/dev/arm/pl111.cc b/src/dev/arm/pl111.cc
index d79a1cf39..7990e02ed 100644
--- a/src/dev/arm/pl111.cc
+++ b/src/dev/arm/pl111.cc
@@ -440,7 +440,7 @@ Pl111::readFramebuffer()
schedule(intEvent, nextCycle());
curAddr = 0;
- startTime = curTick();
+ startTime = curCycle();
maxAddr = static_cast<Addr>(length * bytesPerPixel);
@@ -475,12 +475,12 @@ Pl111::fillFifo()
void
Pl111::dmaDone()
{
- Tick maxFrameTime = lcdTiming2.cpl * height * clock;
+ Tick maxFrameTime = lcdTiming2.cpl * height;
--dmaPendingNum;
if (maxAddr == curAddr && !dmaPendingNum) {
- if ((curTick() - startTime) > maxFrameTime) {
+ if ((curCycle() - startTime) > maxFrameTime) {
warn("CLCD controller buffer underrun, took %d cycles when should"
" have taken %d\n", curTick() - startTime, maxFrameTime);
lcdRis.underflow = 1;
@@ -498,11 +498,13 @@ Pl111::dmaDone()
pic->seekp(0);
bmp->write(pic);
- DPRINTF(PL111, "-- schedule next dma read event at %d tick \n",
- maxFrameTime + curTick());
-
+ // schedule the next read based on when the last frame started
+ // and the desired fps (i.e. maxFrameTime), we turn the
+ // argument into a relative number of cycles in the future by
+ // subtracting curCycle()
if (lcdControl.lcden)
- schedule(readEvent, nextCycle(startTime + maxFrameTime));
+ schedule(readEvent, clockEdge(startTime + maxFrameTime -
+ curCycle()));
}
if (dmaPendingNum > (maxOutstandingDma - waterMark))
diff --git a/src/dev/i8254xGBe.cc b/src/dev/i8254xGBe.cc
index f7f6a1178..3ba140bce 100644
--- a/src/dev/i8254xGBe.cc
+++ b/src/dev/i8254xGBe.cc
@@ -2052,7 +2052,7 @@ IGbE::restartClock()
{
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
getState() == SimObject::Running)
- schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
+ schedule(tickEvent, clockEdge(1));
}
unsigned int
@@ -2434,7 +2434,7 @@ IGbE::tick()
if (rxTick || txTick || txFifoTick)
- schedule(tickEvent, curTick() + ticks(1));
+ schedule(tickEvent, curTick() + clockPeriod());
}
void
diff --git a/src/dev/ns_gige.cc b/src/dev/ns_gige.cc
index 6a55516c5..583cdb140 100644
--- a/src/dev/ns_gige.cc
+++ b/src/dev/ns_gige.cc
@@ -1147,7 +1147,7 @@ NSGigE::rxKick()
}
// Go to the next state machine clock tick.
- rxKickTick = curTick() + ticks(1);
+ rxKickTick = curTick() + clockPeriod();
}
switch(rxDmaState) {
@@ -1594,7 +1594,7 @@ NSGigE::txKick()
}
// Go to the next state machine clock tick.
- txKickTick = curTick() + ticks(1);
+ txKickTick = curTick() + clockPeriod();
}
switch(txDmaState) {
@@ -2015,7 +2015,7 @@ NSGigE::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
- reschedule(txEvent, curTick() + ticks(1), true);
+ reschedule(txEvent, curTick() + clockPeriod(), true);
}
bool
diff --git a/src/sim/clocked_object.hh b/src/sim/clocked_object.hh
index 8ab637772..050a15a74 100644
--- a/src/sim/clocked_object.hh
+++ b/src/sim/clocked_object.hh
@@ -58,6 +58,14 @@ class ClockedObject : public SimObject
private:
+ // the tick value of the next clock edge (>= curTick()) at the
+ // time of the last call to update()
+ mutable Tick tick;
+
+ // The cycle counter value corresponding to the current value of
+ // 'tick'
+ mutable Tick cycle;
+
/**
* Prevent inadvertent use of the copy constructor and assignment
* operator by making them private.
@@ -65,6 +73,34 @@ class ClockedObject : public SimObject
ClockedObject(ClockedObject&);
ClockedObject& operator=(ClockedObject&);
+ /**
+ * Align cycle and tick to the next clock edge if not already done.
+ */
+ void update() const
+ {
+ // both tick and cycle are up-to-date and we are done, note
+ // that the >= is important as it captures cases where tick
+ // has already passed curTick()
+ if (tick >= curTick())
+ return;
+
+ // optimise for the common case and see if the tick should be
+ // advanced by a single clock period
+ tick += clock;
+ ++cycle;
+
+ // see if we are done at this point
+ if (tick >= curTick())
+ return;
+
+ // if not, we have to recalculate the cycle and tick, we
+ // perform the calculations in terms of relative cycles to
+ // allow changes to the clock period in the future
+ Tick elapsedCycles = divCeil(curTick() - tick, clock);
+ cycle += elapsedCycles;
+ tick += elapsedCycles * clock;
+ }
+
protected:
// Clock period in ticks
@@ -74,7 +110,8 @@ class ClockedObject : public SimObject
* Create a clocked object and set the clock based on the
* parameters.
*/
- ClockedObject(const ClockedObjectParams* p) : SimObject(p), clock(p->clock)
+ ClockedObject(const ClockedObjectParams* p) :
+ SimObject(p), tick(0), cycle(0), clock(p->clock)
{ }
/**
@@ -85,33 +122,53 @@ class ClockedObject : public SimObject
public:
/**
- * Based on the clock of the object, determine the tick when the
- * next cycle begins, in other words, round the curTick() to the
- * next tick that is a multiple of the clock.
+ * Determine the tick when a cycle begins, by default the current
+ * one, but the argument also enables the caller to determine a
+ * future cycle.
*
- * @return The tick when the next cycle starts
+ * @param cycles The number of cycles into the future
+ *
+ * @return The tick when the clock edge occurs
*/
- Tick nextCycle() const
- { return divCeil(curTick(), clock) * clock; }
+ inline Tick clockEdge(int cycles = 0) const
+ {
+ // align tick to the next clock edge
+ update();
+
+ // figure out when this future cycle is
+ return tick + ticks(cycles);
+ }
/**
- * Determine the next cycle starting from a given tick instead of
- * curTick().
+ * Determine the current cycle, corresponding to a tick aligned to
+ * a clock edge.
*
- * @param begin_tick The tick to round to a clock edge
+ * @return The current cycle
+ */
+ inline Tick curCycle() const
+ {
+ // align cycle to the next clock edge.
+ update();
+
+ return cycle;
+ }
+
+ /**
+ * Based on the clock of the object, determine the tick when the
+ * next cycle begins, in other words, return the next clock edge.
*
- * @return The tick when the cycle after or on begin_tick starts
+ * @return The tick when the next cycle starts
*/
- Tick nextCycle(Tick begin_tick) const
- { return divCeil(begin_tick, clock) * clock; }
+ Tick nextCycle() const
+ { return clockEdge(); }
inline Tick frequency() const { return SimClock::Frequency / clock; }
- inline Tick ticks(int numCycles) const { return clock * numCycles; }
+ inline Tick ticks(int cycles) const { return clock * cycles; }
- inline Tick curCycle() const { return curTick() / clock; }
+ inline Tick clockPeriod() const { return clock; }
- inline Tick tickToCycles(Tick val) const { return val / clock; }
+ inline Tick tickToCycle(Tick tick) const { return tick / clock; }
};