diff options
author | Steve Reinhardt <steve.reinhardt@amd.com> | 2011-01-07 21:50:29 -0800 |
---|---|---|
committer | Steve Reinhardt <steve.reinhardt@amd.com> | 2011-01-07 21:50:29 -0800 |
commit | 6f1187943cf78c2fd0334bd7e4372ae79a587fa4 (patch) | |
tree | 8d0eac2e2f4d55d48245266d3930ad4e7f92030f /src/cpu | |
parent | c22be9f2f016872b05d65c82055ddc936b4aa075 (diff) | |
download | gem5-6f1187943cf78c2fd0334bd7e4372ae79a587fa4.tar.xz |
Replace curTick global variable with accessor functions.
This step makes it easy to replace the accessor functions
(which still access a global variable) with ones that access
per-thread curTick values.
Diffstat (limited to 'src/cpu')
49 files changed, 191 insertions, 191 deletions
diff --git a/src/cpu/base.cc b/src/cpu/base.cc index e0d29577d..1816568ce 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -66,7 +66,7 @@ CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) cpu(_cpu), _repeatEvent(true) { if (_interval) - cpu->schedule(this, curTick + _interval); + cpu->schedule(this, curTick() + _interval); } void @@ -82,13 +82,13 @@ CPUProgressEvent::process() ipc = 0.0; #else cprintf("%lli: %s progress event, total committed:%i, progress insts " - "committed: %lli\n", curTick, cpu->name(), temp, + "committed: %lli\n", curTick(), cpu->name(), temp, temp - lastNumInst); #endif lastNumInst = temp; if (_repeatEvent) - cpu->schedule(this, curTick + _interval); + cpu->schedule(this, curTick() + _interval); } const char * @@ -110,7 +110,7 @@ BaseCPU::BaseCPU(Params *p) phase(p->phase) #endif { -// currentTick = curTick; +// currentTick = curTick(); // if Python did not provide a valid ID, do it here if (_cpuId == -1 ) { @@ -231,7 +231,7 @@ BaseCPU::startup() { #if FULL_SYSTEM if (!params()->defer_registration && profileEvent) - schedule(profileEvent, curTick); + schedule(profileEvent, curTick()); #endif if (params()->progress_interval) { @@ -270,7 +270,7 @@ BaseCPU::regStats() Tick BaseCPU::nextCycle() { - Tick next_tick = curTick - phase + clock - 1; + Tick next_tick = curTick() - phase + clock - 1; next_tick -= (next_tick % clock); next_tick += phase; return next_tick; @@ -284,7 +284,7 @@ BaseCPU::nextCycle(Tick begin_tick) next_tick = next_tick - (next_tick % clock) + clock; next_tick += phase; - assert(next_tick >= curTick); + assert(next_tick >= curTick()); return next_tick; } @@ -390,7 +390,7 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc) threadContexts[i]->profileClear(); if (profileEvent) - schedule(profileEvent, curTick); + schedule(profileEvent, curTick()); #endif // Connect new CPU to old CPU's memory only if new CPU isn't @@ -424,7 +424,7 @@ BaseCPU::ProfileEvent::process() tc->profileSample(); } - cpu->schedule(this, curTick + interval); + cpu->schedule(this, curTick() + interval); } void @@ -465,7 +465,7 @@ BaseCPU::traceFunctionsInternal(Addr pc) } ccprintf(*functionTraceStream, " (%d)\n%d: %s", - curTick - functionEntryTick, curTick, sym_str); - functionEntryTick = curTick; + curTick() - functionEntryTick, curTick(), sym_str); + functionEntryTick = curTick(); } } diff --git a/src/cpu/base.hh b/src/cpu/base.hh index 5b03d904f..e0491a84a 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -100,20 +100,20 @@ class BaseCPU : public MemObject // Tick currentTick; inline Tick frequency() const { return SimClock::Frequency / clock; } inline Tick ticks(int numCycles) const { return clock * numCycles; } - inline Tick curCycle() const { return curTick / clock; } + inline Tick curCycle() const { return curTick() / clock; } inline Tick tickToCycles(Tick val) const { return val / clock; } // @todo remove me after debugging with legion done Tick instCount() { return instCnt; } /** The next cycle the CPU should be scheduled, given a cache * access or quiesce event returning on this cycle. This function - * may return curTick if the CPU should run on the current cycle. + * may return curTick() if the CPU should run on the current cycle. */ Tick nextCycle(); /** The next cycle the CPU should be scheduled, given a cache * access or quiesce event returning on the given Tick. This - * function may return curTick if the CPU should run on the + * function may return curTick() if the CPU should run on the * current cycle. * @param begin_tick The tick that the event is completing on. */ diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc index 10dd77899..079057765 100644 --- a/src/cpu/checker/cpu.cc +++ b/src/cpu/checker/cpu.cc @@ -245,7 +245,7 @@ CheckerCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) if (data != inst_data) { warn("%lli: Store value does not match value in memory! " "Instruction: %#x, memory: %#x", - curTick, inst_data, data); + curTick(), inst_data, data); handleError(); } } @@ -327,6 +327,6 @@ void CheckerCPU::dumpAndExit() { warn("%lli: Checker PC:%#x, next PC:%#x", - curTick, thread->readPC(), thread->readNextPC()); + curTick(), thread->readPC(), thread->readNextPC()); panic("Checker found an error!"); } diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh index 10a9d1177..8197d560d 100644 --- a/src/cpu/checker/cpu_impl.hh +++ b/src/cpu/checker/cpu_impl.hh @@ -126,7 +126,7 @@ Checker<DynInstPtr>::verify(DynInstPtr &completed_inst) } else { warn("%lli: Changed PC does not match expected PC, " "changed: %#x, expected: %#x", - curTick, thread->readPC(), newPC); + curTick(), thread->readPC(), newPC); CheckerCPU::handleError(); } willChangePC = false; @@ -166,7 +166,7 @@ Checker<DynInstPtr>::verify(DynInstPtr &completed_inst) // translate this instruction; in the SMT case it's // possible that its ITB entry was kicked out. warn("%lli: Instruction PC %#x was not found in the ITB!", - curTick, thread->readPC()); + curTick(), thread->readPC()); handleError(inst); // go to the next instruction @@ -315,10 +315,10 @@ Checker<DynInstPtr>::validateInst(DynInstPtr &inst) { if (inst->readPC() != thread->readPC()) { warn("%lli: PCs do not match! Inst: %#x, checker: %#x", - curTick, inst->readPC(), thread->readPC()); + curTick(), inst->readPC(), thread->readPC()); if (changedPC) { warn("%lli: Changed PCs recently, may not be an error", - curTick); + curTick()); } else { handleError(inst); } @@ -329,7 +329,7 @@ Checker<DynInstPtr>::validateInst(DynInstPtr &inst) if (mi != machInst) { warn("%lli: Binary instructions do not match! Inst: %#x, " "checker: %#x", - curTick, mi, machInst); + curTick(), mi, machInst); handleError(inst); } } @@ -354,7 +354,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst) if (result_mismatch) { warn("%lli: Instruction results do not match! (Values may not " "actually be integers) Inst: %#x, checker: %#x", - curTick, inst->readIntResult(), result.integer); + curTick(), inst->readIntResult(), result.integer); // It's useful to verify load values from memory, but in MP // systems the value obtained at execute may be different than @@ -371,7 +371,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst) if (inst->readNextPC() != thread->readNextPC()) { warn("%lli: Instruction next PCs do not match! Inst: %#x, " "checker: %#x", - curTick, inst->readNextPC(), thread->readNextPC()); + curTick(), inst->readNextPC(), thread->readNextPC()); handleError(inst); } @@ -388,7 +388,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst) thread->readMiscRegNoEffect(misc_reg_idx)) { warn("%lli: Misc reg idx %i (side effect) does not match! " "Inst: %#x, checker: %#x", - curTick, misc_reg_idx, + curTick(), misc_reg_idx, inst->tcBase()->readMiscRegNoEffect(misc_reg_idx), thread->readMiscRegNoEffect(misc_reg_idx)); handleError(inst); @@ -402,7 +402,7 @@ Checker<DynInstPtr>::validateState() { if (updateThisCycle) { warn("%lli: Instruction PC %#x results didn't match up, copying all " - "registers from main CPU", curTick, unverifiedInst->readPC()); + "registers from main CPU", curTick(), unverifiedInst->readPC()); // Heavy-weight copying of all registers thread->copyArchRegs(unverifiedInst->tcBase()); // Also advance the PC. Hopefully no PC-based events happened. diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc index 5b5f524f2..fe2c8e708 100644 --- a/src/cpu/inorder/cpu.cc +++ b/src/cpu/inorder/cpu.cc @@ -158,7 +158,7 @@ void InOrderCPU::CPUEvent::scheduleEvent(int delay) { assert(!scheduled() || squashed()); - cpu->reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)), true); + cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true); } void @@ -337,7 +337,7 @@ InOrderCPU::InOrderCPU(Params *params) dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0); dummyBufferInst->setSquashed(); - lastRunningCycle = curTick; + lastRunningCycle = curTick(); // Reset CPU to reset state. #if FULL_SYSTEM @@ -528,17 +528,17 @@ InOrderCPU::tick() if (!tickEvent.scheduled()) { if (_status == SwitchedOut) { // increment stat - lastRunningCycle = curTick; + lastRunningCycle = curTick(); } else if (!activityRec.active()) { DPRINTF(InOrderCPU, "sleeping CPU.\n"); - lastRunningCycle = curTick; + lastRunningCycle = curTick(); timesIdled++; } else { - //Tick next_tick = curTick + cycles(1); + //Tick next_tick = curTick() + cycles(1); //tickEvent.schedule(next_tick); - schedule(&tickEvent, nextCycle(curTick + 1)); + schedule(&tickEvent, nextCycle(curTick() + 1)); DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n", - nextCycle(curTick + 1)); + nextCycle(curTick() + 1)); } } @@ -693,10 +693,10 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault, CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst, event_pri_offset); - Tick sked_tick = nextCycle(curTick + ticks(delay)); + Tick sked_tick = nextCycle(curTick() + ticks(delay)); if (delay >= 0) { DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n", - eventNames[c_event], curTick + delay, tid); + eventNames[c_event], curTick() + delay, tid); schedule(cpu_event, sked_tick); } else { cpu_event->process(); @@ -791,7 +791,7 @@ InOrderCPU::activateThread(ThreadID tid) activateThreadInPipeline(tid); - thread[tid]->lastActivate = curTick; + thread[tid]->lastActivate = curTick(); tcBase(tid)->setStatus(ThreadContext::Active); @@ -963,7 +963,7 @@ InOrderCPU::suspendThread(ThreadID tid) tid); deactivateThread(tid); suspendedThreads.push_back(tid); - thread[tid]->lastSuspend = curTick; + thread[tid]->lastSuspend = curTick(); tcBase(tid)->setStatus(ThreadContext::Suspended); } @@ -1124,7 +1124,7 @@ InOrderCPU::instDone(DynInstPtr inst, ThreadID tid) // Finalize Trace Data For Instruction if (inst->traceData) { - //inst->traceData->setCycle(curTick); + //inst->traceData->setCycle(curTick()); inst->traceData->setFetchSeq(inst->seqNum); //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst); inst->traceData->dump(); @@ -1390,7 +1390,7 @@ InOrderCPU::wakeCPU() DPRINTF(Activity, "Waking up CPU\n"); - Tick extra_cycles = tickToCycles((curTick - 1) - lastRunningCycle); + Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle); idleCycles += extra_cycles; for (int stage_num = 0; stage_num < NumStages; stage_num++) { @@ -1399,7 +1399,7 @@ InOrderCPU::wakeCPU() numCycles += extra_cycles; - schedule(&tickEvent, nextCycle(curTick)); + schedule(&tickEvent, nextCycle(curTick())); } #if FULL_SYSTEM diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh index 38978dbd7..c3658373a 100644 --- a/src/cpu/inorder/cpu.hh +++ b/src/cpu/inorder/cpu.hh @@ -157,7 +157,7 @@ class InOrderCPU : public BaseCPU void scheduleTickEvent(int delay) { assert(!tickEvent.scheduled() || tickEvent.squashed()); - reschedule(&tickEvent, nextCycle(curTick + ticks(delay)), true); + reschedule(&tickEvent, nextCycle(curTick() + ticks(delay)), true); } /** Unschedule tick event, regardless of its current state. */ diff --git a/src/cpu/inorder/inorder_dyn_inst.cc b/src/cpu/inorder/inorder_dyn_inst.cc index 70fd59418..6afe35862 100644 --- a/src/cpu/inorder/inorder_dyn_inst.cc +++ b/src/cpu/inorder/inorder_dyn_inst.cc @@ -442,7 +442,7 @@ InOrderDynInst::setMiscRegOperand(const StaticInst *si, int idx, { instResult[idx].type = Integer; instResult[idx].val.integer = val; - instResult[idx].tick = curTick; + instResult[idx].tick = curTick(); DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Misc Reg. Operand %i " "being set to %#x.\n", threadNumber, seqNum, idx, val); @@ -472,7 +472,7 @@ InOrderDynInst::setIntRegOperand(const StaticInst *si, int idx, IntReg val) { instResult[idx].type = Integer; instResult[idx].val.integer = val; - instResult[idx].tick = curTick; + instResult[idx].tick = curTick(); DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Int Reg. %i " "being set to %#x (result-tick:%i).\n", @@ -485,7 +485,7 @@ InOrderDynInst::setFloatRegOperand(const StaticInst *si, int idx, FloatReg val) { instResult[idx].val.dbl = val; instResult[idx].type = Float; - instResult[idx].tick = curTick; + instResult[idx].tick = curTick(); DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Float Reg. %i " "being set to %#x (result-tick:%i).\n", @@ -499,7 +499,7 @@ InOrderDynInst::setFloatRegOperandBits(const StaticInst *si, int idx, { instResult[idx].type = Integer; instResult[idx].val.integer = val; - instResult[idx].tick = curTick; + instResult[idx].tick = curTick(); DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Float Reg. %i " "being set to %#x (result-tick:%i).\n", diff --git a/src/cpu/inorder/pipeline_stage.cc b/src/cpu/inorder/pipeline_stage.cc index dc36965b0..2ac402fae 100644 --- a/src/cpu/inorder/pipeline_stage.cc +++ b/src/cpu/inorder/pipeline_stage.cc @@ -338,7 +338,7 @@ void PipelineStage::squashDueToBranch(DynInstPtr &inst, ThreadID tid) { if (cpu->squashSeqNum[tid] < inst->seqNum && - cpu->lastSquashCycle[tid] == curTick){ + cpu->lastSquashCycle[tid] == curTick()){ DPRINTF(Resource, "Ignoring [sn:%i] branch squash signal due to " "another stage's squash signal for after [sn:%i].\n", inst->seqNum, cpu->squashSeqNum[tid]); @@ -371,7 +371,7 @@ PipelineStage::squashDueToBranch(DynInstPtr &inst, ThreadID tid) // Save squash num for later stage use cpu->squashSeqNum[tid] = squash_seq_num; - cpu->lastSquashCycle[tid] = curTick; + cpu->lastSquashCycle[tid] = curTick(); } } @@ -969,7 +969,7 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed) inst->popSchedEntry(); } else { panic("%i: encountered %s fault!\n", - curTick, req->fault->name()); + curTick(), req->fault->name()); } reqs_processed++; @@ -1075,7 +1075,7 @@ PipelineStage::sendInstToNextStage(DynInstPtr inst) if (nextStageQueueValid(inst->nextStage - 1)) { if (inst->seqNum > cpu->squashSeqNum[tid] && - curTick == cpu->lastSquashCycle[tid]) { + curTick() == cpu->lastSquashCycle[tid]) { DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, skipping " "insertion into stage %i queue.\n", tid, inst->seqNum, inst->nextStage); @@ -1107,7 +1107,7 @@ PipelineStage::sendInstToNextStage(DynInstPtr inst) // Take note of trace data for this inst & stage if (inst->traceData) { - inst->traceData->setStageCycle(stageNum, curTick); + inst->traceData->setStageCycle(stageNum, curTick()); } } diff --git a/src/cpu/inorder/reg_dep_map.cc b/src/cpu/inorder/reg_dep_map.cc index 50636cb81..98a0727a9 100644 --- a/src/cpu/inorder/reg_dep_map.cc +++ b/src/cpu/inorder/reg_dep_map.cc @@ -181,14 +181,14 @@ RegDepMap::canForward(unsigned reg_idx, DynInstPtr inst) assert(dest_reg_idx != -1); if (forward_inst->isExecuted() && - forward_inst->readResultTime(dest_reg_idx) < curTick) { + forward_inst->readResultTime(dest_reg_idx) < curTick()) { return forward_inst; } else { if (!forward_inst->isExecuted()) { DPRINTF(RegDepMap, "[sn:%i] Can't get value through " "forwarding, [sn:%i] has not been executed yet.\n", inst->seqNum, forward_inst->seqNum); - } else if (forward_inst->readResultTime(dest_reg_idx) >= curTick) { + } else if (forward_inst->readResultTime(dest_reg_idx) >= curTick()) { DPRINTF(RegDepMap, "[sn:%i] Can't get value through " "forwarding, [sn:%i] executed on tick:%i.\n", inst->seqNum, forward_inst->seqNum, diff --git a/src/cpu/inorder/resource.cc b/src/cpu/inorder/resource.cc index 0d8dbb3e4..8c5f86c73 100644 --- a/src/cpu/inorder/resource.cc +++ b/src/cpu/inorder/resource.cc @@ -366,7 +366,7 @@ Resource::scheduleEvent(int slot_idx, int delay) DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n", reqMap[slot_idx]->inst->readTid(), reqMap[slot_idx]->inst->seqNum, - cpu->ticks(delay) + curTick); + cpu->ticks(delay) + curTick()); resourceEvent[slot_idx].scheduleEvent(delay); } @@ -504,5 +504,5 @@ ResourceEvent::scheduleEvent(int delay) { assert(!scheduled() || squashed()); resource->cpu->reschedule(this, - curTick + resource->ticks(delay), true); + curTick() + resource->ticks(delay), true); } diff --git a/src/cpu/inorder/resource_pool.9stage.cc b/src/cpu/inorder/resource_pool.9stage.cc index 05ce91faa..746d3f33b 100644 --- a/src/cpu/inorder/resource_pool.9stage.cc +++ b/src/cpu/inorder/resource_pool.9stage.cc @@ -177,13 +177,13 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case InOrderCPU::ActivateThread: { DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event for tick %i.\n", - curTick + delay); + curTick() + delay); res_pool_event->setEvent(e_type, inst, inst->squashingStage, inst->bdelaySeqNum, inst->readTid()); - res_pool_event->schedule(curTick + cpu->cycles(delay)); + res_pool_event->schedule(curTick() + cpu->cycles(delay)); } break; @@ -192,7 +192,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case InOrderCPU::DeallocateThread: { DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool Event for tick %i.\n", - curTick + delay); + curTick() + delay); res_pool_event->setEvent(e_type, inst, @@ -200,7 +200,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, inst->bdelaySeqNum, tid); - res_pool_event->schedule(curTick + cpu->cycles(delay)); + res_pool_event->schedule(curTick() + cpu->cycles(delay)); } break; @@ -208,14 +208,14 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case ResourcePool::InstGraduated: { DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool Event for tick %i.\n", - curTick + delay); + curTick() + delay); res_pool_event->setEvent(e_type, inst, inst->squashingStage, inst->seqNum, inst->readTid()); - res_pool_event->schedule(curTick + cpu->cycles(delay)); + res_pool_event->schedule(curTick() + cpu->cycles(delay)); } break; @@ -223,13 +223,13 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case ResourcePool::SquashAll: { DPRINTF(Resource, "Scheduling Squash Resource Pool Event for tick %i.\n", - curTick + delay); + curTick() + delay); res_pool_event->setEvent(e_type, inst, inst->squashingStage, inst->bdelaySeqNum, inst->readTid()); - res_pool_event->schedule(curTick + cpu->cycles(delay)); + res_pool_event->schedule(curTick() + cpu->cycles(delay)); } break; @@ -345,9 +345,9 @@ void ResourcePool::ResPoolEvent::scheduleEvent(int delay) { if (squashed()) - reschedule(curTick + resPool->cpu->cycles(delay)); + reschedule(curTick() + resPool->cpu->cycles(delay)); else if (!scheduled()) - schedule(curTick + resPool->cpu->cycles(delay)); + schedule(curTick() + resPool->cpu->cycles(delay)); } /** Unschedule resource event, regardless of its current state. */ diff --git a/src/cpu/inorder/resource_pool.cc b/src/cpu/inorder/resource_pool.cc index e199d2bc2..e8400405a 100644 --- a/src/cpu/inorder/resource_pool.cc +++ b/src/cpu/inorder/resource_pool.cc @@ -244,14 +244,14 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, { assert(delay >= 0); - Tick when = cpu->nextCycle(curTick + cpu->ticks(delay)); + Tick when = cpu->nextCycle(curTick() + cpu->ticks(delay)); switch (e_type) { case InOrderCPU::ActivateThread: { DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event " - "for tick %i, [tid:%i].\n", curTick + delay, + "for tick %i, [tid:%i].\n", curTick() + delay, inst->readTid()); ResPoolEvent *res_pool_event = new ResPoolEvent(this, @@ -269,7 +269,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, { DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool " - "Event for tick %i.\n", curTick + delay); + "Event for tick %i.\n", curTick() + delay); ResPoolEvent *res_pool_event = new ResPoolEvent(this, e_type, @@ -304,7 +304,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case ResourcePool::InstGraduated: { DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool " - "Event for tick %i.\n", curTick + delay); + "Event for tick %i.\n", curTick() + delay); ResPoolEvent *res_pool_event = new ResPoolEvent(this,e_type, inst, @@ -318,7 +318,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, case ResourcePool::SquashAll: { DPRINTF(Resource, "Scheduling Squash Resource Pool Event for " - "tick %i.\n", curTick + delay); + "tick %i.\n", curTick() + delay); ResPoolEvent *res_pool_event = new ResPoolEvent(this,e_type, inst, @@ -333,7 +333,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, { DPRINTF(Resource, "Scheduling Squash Due to Memory Stall Resource " "Pool Event for tick %i.\n", - curTick + delay); + curTick() + delay); ResPoolEvent *res_pool_event = new ResPoolEvent(this,e_type, inst, @@ -348,7 +348,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, { DPRINTF(Resource, "Scheduling UpdatePC Resource Pool Event " "for tick %i.\n", - curTick + delay); + curTick() + delay); ResPoolEvent *res_pool_event = new ResPoolEvent(this,e_type, inst, inst->squashingStage, @@ -542,7 +542,7 @@ ResourcePool::ResPoolEvent::scheduleEvent(int delay) { InOrderCPU *cpu = resPool->cpu; assert(!scheduled() || squashed()); - cpu->reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)), true); + cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true); } /** Unschedule resource event, regardless of its current state. */ diff --git a/src/cpu/inorder/resources/branch_predictor.cc b/src/cpu/inorder/resources/branch_predictor.cc index 33b67ce4a..dc036df64 100644 --- a/src/cpu/inorder/resources/branch_predictor.cc +++ b/src/cpu/inorder/resources/branch_predictor.cc @@ -80,7 +80,7 @@ BranchPredictor::execute(int slot_num) case PredictBranch: { if (inst->seqNum > cpu->squashSeqNum[tid] && - curTick == cpu->lastSquashCycle[tid]) { + curTick() == cpu->lastSquashCycle[tid]) { DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, " "skipping prediction \n", tid, inst->seqNum); } else { @@ -125,7 +125,7 @@ BranchPredictor::execute(int slot_num) case UpdatePredictor: { if (inst->seqNum > cpu->squashSeqNum[tid] && - curTick == cpu->lastSquashCycle[tid]) { + curTick() == cpu->lastSquashCycle[tid]) { DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, " "skipping branch predictor update \n", tid, inst->seqNum); diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc index 5f9ddd372..bb4caf48a 100644 --- a/src/cpu/inorder/resources/cache_unit.cc +++ b/src/cpu/inorder/resources/cache_unit.cc @@ -63,7 +63,7 @@ Tick CacheUnit::CachePort::recvAtomic(PacketPtr pkt) { panic("CacheUnit::CachePort doesn't expect recvAtomic callback!"); - return curTick; + return curTick(); } void @@ -167,7 +167,7 @@ CacheUnit::getSlot(DynInstPtr inst) if (new_slot == -1) return -1; - inst->memTime = curTick; + inst->memTime = curTick(); setAddrDependency(inst); return new_slot; } else { @@ -343,7 +343,7 @@ CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx, break; default: - panic("%i: Unexpected request type (%i) to %s", curTick, + panic("%i: Unexpected request type (%i) to %s", curTick(), sched_entry->cmd, name()); } @@ -482,7 +482,7 @@ CacheUnit::read(DynInstPtr inst, Addr addr, if (secondAddr > addr && !inst->split2ndAccess) { DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for " - "(%#x, %#x).\n", curTick, inst->seqNum, addr, secondAddr); + "(%#x, %#x).\n", curTick(), inst->seqNum, addr, secondAddr); // Save All "Total" Split Information // ============================== diff --git a/src/cpu/inorder/resources/execution_unit.cc b/src/cpu/inorder/resources/execution_unit.cc index 4342042e9..9ba7a64c7 100644 --- a/src/cpu/inorder/resources/execution_unit.cc +++ b/src/cpu/inorder/resources/execution_unit.cc @@ -55,7 +55,7 @@ ExecutionUnit::regStats() .name(name() + ".predictedNotTakenIncorrect") .desc("Number of Branches Incorrectly Predicted As Not Taken)."); - lastExecuteCycle = curTick; + lastExecuteCycle = curTick(); executions .name(name() + ".executions") @@ -98,8 +98,8 @@ ExecutionUnit::execute(int slot_num) { case ExecuteInst: { - if (curTick != lastExecuteCycle) { - lastExecuteCycle = curTick; + if (curTick() != lastExecuteCycle) { + lastExecuteCycle = curTick(); } diff --git a/src/cpu/inorder/resources/fetch_seq_unit.cc b/src/cpu/inorder/resources/fetch_seq_unit.cc index 3bfe912e7..7fd57cc75 100644 --- a/src/cpu/inorder/resources/fetch_seq_unit.cc +++ b/src/cpu/inorder/resources/fetch_seq_unit.cc @@ -210,13 +210,13 @@ FetchSeqUnit::squash(DynInstPtr inst, int squash_stage, } if (squashSeqNum[tid] <= done_seq_num && - lastSquashCycle[tid] == curTick) { + lastSquashCycle[tid] == curTick()) { DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, " "since there is an outstanding squash that is older.\n", tid, squash_stage); } else { squashSeqNum[tid] = done_seq_num; - lastSquashCycle[tid] = curTick; + lastSquashCycle[tid] = curTick(); // If The very next instruction number is the done seq. num, // then we haven't seen the delay slot yet ... if it isn't diff --git a/src/cpu/inorder/resources/graduation_unit.cc b/src/cpu/inorder/resources/graduation_unit.cc index a9b96a49f..9d19c2eef 100644 --- a/src/cpu/inorder/resources/graduation_unit.cc +++ b/src/cpu/inorder/resources/graduation_unit.cc @@ -64,8 +64,8 @@ GraduationUnit::execute(int slot_num) // @TODO: Instructions should never really get to this point since // this should be handled through the request interface. Check to // make sure this happens and delete this code. - if (lastCycleGrad != curTick) { - lastCycleGrad = curTick; + if (lastCycleGrad != curTick()) { + lastCycleGrad = curTick(); numCycleGrad = 0; } else if (numCycleGrad > width) { DPRINTF(InOrderGraduation, @@ -91,7 +91,7 @@ GraduationUnit::execute(int slot_num) } if (inst->traceData) { - inst->traceData->setStageCycle(stage_num, curTick); + inst->traceData->setStageCycle(stage_num, curTick()); } // Tell CPU that instruction is finished processing diff --git a/src/cpu/inorder/resources/mult_div_unit.cc b/src/cpu/inorder/resources/mult_div_unit.cc index d9a887571..55df1cc43 100644 --- a/src/cpu/inorder/resources/mult_div_unit.cc +++ b/src/cpu/inorder/resources/mult_div_unit.cc @@ -163,7 +163,7 @@ MultDivUnit::getSlot(DynInstPtr inst) } } - if (lastMDUCycle + repeat_rate > curTick) { + if (lastMDUCycle + repeat_rate > curTick()) { DPRINTF(InOrderMDU, "MDU not ready to process another inst. until %i, " "denying request.\n", lastMDUCycle + repeat_rate); return -1; @@ -173,7 +173,7 @@ MultDivUnit::getSlot(DynInstPtr inst) rval); if (rval != -1) { - lastMDUCycle = curTick; + lastMDUCycle = curTick(); lastOpType = inst->opClass(); lastInstName = inst->staticInst->getName(); } diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index e8681f6e3..2912cbb03 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -475,7 +475,7 @@ DefaultCommit<Impl>::generateTrapEvent(ThreadID tid) TrapEvent *trap = new TrapEvent(this, tid); - cpu->schedule(trap, curTick + trapLatency); + cpu->schedule(trap, curTick() + trapLatency); trapInFlight[tid] = true; } diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index 21c5cc706..9becc6601 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -334,7 +334,7 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params) // Setup the ROB for whichever stages need it. commit.setROB(&rob); - lastRunningCycle = curTick; + lastRunningCycle = curTick(); lastActivatedCycle = -1; #if 0 @@ -538,13 +538,13 @@ FullO3CPU<Impl>::tick() getState() == SimObject::Drained) { DPRINTF(O3CPU, "Switched out!\n"); // increment stat - lastRunningCycle = curTick; + lastRunningCycle = curTick(); } else if (!activityRec.active() || _status == Idle) { DPRINTF(O3CPU, "Idle!\n"); - lastRunningCycle = curTick; + lastRunningCycle = curTick(); timesIdled++; } else { - schedule(tickEvent, nextCycle(curTick + ticks(1))); + schedule(tickEvent, nextCycle(curTick() + ticks(1))); DPRINTF(O3CPU, "Scheduling next tick!\n"); } } @@ -639,13 +639,13 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) // Needs to set each stage to running as well. if (delay){ DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate " - "on cycle %d\n", tid, curTick + ticks(delay)); + "on cycle %d\n", tid, curTick() + ticks(delay)); scheduleActivateThreadEvent(tid, delay); } else { activateThread(tid); } - if (lastActivatedCycle < curTick) { + if (lastActivatedCycle < curTick()) { scheduleTickEvent(delay); // Be sure to signal that there's some activity so the CPU doesn't @@ -653,7 +653,7 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) activityRec.activity(); fetch.wakeFromQuiesce(); - lastActivatedCycle = curTick; + lastActivatedCycle = curTick(); _status = Running; } @@ -666,7 +666,7 @@ FullO3CPU<Impl>::deallocateContext(ThreadID tid, bool remove, int delay) // Schedule removal of thread data from CPU if (delay){ DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate " - "on cycle %d\n", tid, curTick + ticks(delay)); + "on cycle %d\n", tid, curTick() + ticks(delay)); scheduleDeallocateContextEvent(tid, remove, delay); return false; } else { @@ -1552,8 +1552,8 @@ FullO3CPU<Impl>::wakeCPU() DPRINTF(Activity, "Waking up CPU\n"); - idleCycles += tickToCycles((curTick - 1) - lastRunningCycle); - numCycles += tickToCycles((curTick - 1) - lastRunningCycle); + idleCycles += tickToCycles((curTick() - 1) - lastRunningCycle); + numCycles += tickToCycles((curTick() - 1) - lastRunningCycle); schedule(tickEvent, nextCycle()); } diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index 832d98f55..e3d13c840 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -140,9 +140,9 @@ class FullO3CPU : public BaseO3CPU void scheduleTickEvent(int delay) { if (tickEvent.squashed()) - reschedule(tickEvent, nextCycle(curTick + ticks(delay))); + reschedule(tickEvent, nextCycle(curTick() + ticks(delay))); else if (!tickEvent.scheduled()) - schedule(tickEvent, nextCycle(curTick + ticks(delay))); + schedule(tickEvent, nextCycle(curTick() + ticks(delay))); } /** Unschedule tick event, regardless of its current state. */ @@ -182,10 +182,10 @@ class FullO3CPU : public BaseO3CPU // Schedule thread to activate, regardless of its current state. if (activateThreadEvent[tid].squashed()) reschedule(activateThreadEvent[tid], - nextCycle(curTick + ticks(delay))); + nextCycle(curTick() + ticks(delay))); else if (!activateThreadEvent[tid].scheduled()) schedule(activateThreadEvent[tid], - nextCycle(curTick + ticks(delay))); + nextCycle(curTick() + ticks(delay))); } /** Unschedule actiavte thread event, regardless of its current state. */ @@ -235,10 +235,10 @@ class FullO3CPU : public BaseO3CPU // Schedule thread to activate, regardless of its current state. if (deallocateContextEvent[tid].squashed()) reschedule(deallocateContextEvent[tid], - nextCycle(curTick + ticks(delay))); + nextCycle(curTick() + ticks(delay))); else if (!deallocateContextEvent[tid].scheduled()) schedule(deallocateContextEvent[tid], - nextCycle(curTick + ticks(delay))); + nextCycle(curTick() + ticks(delay))); } /** Unschedule thread deallocation in CPU */ diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index cca6b7a57..28ef423c4 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -68,7 +68,7 @@ Tick DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt) { panic("DefaultFetch doesn't expect recvAtomic callback!"); - return curTick; + return curTick(); } template<class Impl> @@ -625,7 +625,7 @@ DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, Fault &ret_fault, ThreadID tid, DPRINTF(Fetch, "[tid:%i]: Doing cache access.\n", tid); - lastIcacheStall[tid] = curTick; + lastIcacheStall[tid] = curTick(); DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache " "response.\n", tid); @@ -992,7 +992,7 @@ DefaultFetch<Impl>::buildInst(ThreadID tid, StaticInstPtr staticInst, #if TRACING_ON if (trace) { instruction->traceData = - cpu->getTracer()->getInstRecord(curTick, cpu->tcBase(tid), + cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid), instruction->staticInst, thisPC, curMacroop); } #else diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh index b944979f2..ce408dfd0 100644 --- a/src/cpu/o3/inst_queue_impl.hh +++ b/src/cpu/o3/inst_queue_impl.hh @@ -754,7 +754,7 @@ InstructionQueue<Impl>::scheduleReadyInsts() FUCompletion *execution = new FUCompletion(issuing_inst, idx, this); - cpu->schedule(execution, curTick + cpu->ticks(op_latency - 1)); + cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1)); // @todo: Enforce that issue_latency == 1 or op_latency if (issue_latency > 1) { diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index e780b14e4..ddfc63754 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -55,7 +55,7 @@ Tick LSQ<Impl>::DcachePort::recvAtomic(PacketPtr pkt) { panic("O3CPU model does not work with atomic mode!"); - return curTick; + return curTick(); } template <class Impl> diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index e9e3fea96..2bb42cadc 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -624,7 +624,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, // We'll say this has a 1 cycle load-store forwarding latency // for now. // @todo: Need to make this a parameter. - cpu->schedule(wb, curTick); + cpu->schedule(wb, curTick()); // Don't need to do anything special for split loads. if (TheISA::HasUnalignedMemAcc && sreqLow) { diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index 807c0b527..64d674666 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -783,7 +783,7 @@ LSQUnit<Impl>::writebackStores() "Instantly completing it.\n", inst->seqNum); WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); - cpu->schedule(wb, curTick + 1); + cpu->schedule(wb, curTick() + 1); completeStore(storeWBIdx); incrStIdx(storeWBIdx); continue; diff --git a/src/cpu/o3/thread_context_impl.hh b/src/cpu/o3/thread_context_impl.hh index 060baed32..e7b0540d1 100755 --- a/src/cpu/o3/thread_context_impl.hh +++ b/src/cpu/o3/thread_context_impl.hh @@ -115,7 +115,7 @@ O3ThreadContext<Impl>::activate(int delay) return; #if FULL_SYSTEM - thread->lastActivate = curTick; + thread->lastActivate = curTick(); #endif thread->setStatus(ThreadContext::Active); @@ -135,8 +135,8 @@ O3ThreadContext<Impl>::suspend(int delay) return; #if FULL_SYSTEM - thread->lastActivate = curTick; - thread->lastSuspend = curTick; + thread->lastActivate = curTick(); + thread->lastSuspend = curTick(); #endif /* #if FULL_SYSTEM diff --git a/src/cpu/ozone/back_end.hh b/src/cpu/ozone/back_end.hh index 7a2da3239..95e079d0d 100644 --- a/src/cpu/ozone/back_end.hh +++ b/src/cpu/ozone/back_end.hh @@ -468,7 +468,7 @@ BackEnd<Impl>::read(RequestPtr req, T &data, int load_idx) if (fault == NoFault && dcacheInterface) { memReq->cmd = Read; memReq->completionEvent = NULL; - memReq->time = curTick; + memReq->time = curTick(); memReq->flags &= ~INST_READ; MemAccessResult result = dcacheInterface->access(memReq); @@ -481,7 +481,7 @@ BackEnd<Impl>::read(RequestPtr req, T &data, int load_idx) --funcExeInst; memReq->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); // status = DcacheMissStall; DPRINTF(OzoneCPU, "Dcache miss stall!\n"); @@ -510,7 +510,7 @@ BackEnd<Impl>::write(RequestPtr req, T &data, int store_idx) memReq->cmd = Write; memcpy(memReq->data,(uint8_t *)&data,memReq->size); memReq->completionEvent = NULL; - memReq->time = curTick; + memReq->time = curTick(); memReq->flags &= ~INST_READ; MemAccessResult result = dcacheInterface->access(memReq); @@ -519,7 +519,7 @@ BackEnd<Impl>::write(RequestPtr req, T &data, int store_idx) // at some point. if (result != MA_HIT && dcacheInterface->doEvents()) { memReq->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); // status = DcacheMissStall; DPRINTF(OzoneCPU, "Dcache miss stall!\n"); diff --git a/src/cpu/ozone/cpu.hh b/src/cpu/ozone/cpu.hh index fcc5602eb..1b196feb4 100644 --- a/src/cpu/ozone/cpu.hh +++ b/src/cpu/ozone/cpu.hh @@ -277,9 +277,9 @@ class OzoneCPU : public BaseCPU void scheduleTickEvent(int delay) { if (tickEvent.squashed()) - tickEvent.reschedule(curTick + ticks(delay)); + tickEvent.reschedule(curTick() + ticks(delay)); else if (!tickEvent.scheduled()) - tickEvent.schedule(curTick + ticks(delay)); + tickEvent.schedule(curTick() + ticks(delay)); } /// Unschedule tick event, regardless of its current state. diff --git a/src/cpu/ozone/cpu_impl.hh b/src/cpu/ozone/cpu_impl.hh index a22ada5d0..dd6c3dcf1 100644 --- a/src/cpu/ozone/cpu_impl.hh +++ b/src/cpu/ozone/cpu_impl.hh @@ -301,7 +301,7 @@ OzoneCPU<Impl>::takeOverFrom(BaseCPU *oldCPU) if (tc->status() == ThreadContext::Active && _status != Running) { _status = Running; - tickEvent.schedule(curTick); + tickEvent.schedule(curTick()); } } // Nothing running, change status to reflect that we're no longer @@ -525,7 +525,7 @@ OzoneCPU<Impl>::tick() comInstEventQueue[0]->serviceEvents(numInst); if (!tickEvent.scheduled() && _status == Running) - tickEvent.schedule(curTick + ticks(1)); + tickEvent.schedule(curTick() + ticks(1)); } template <class Impl> diff --git a/src/cpu/ozone/front_end_impl.hh b/src/cpu/ozone/front_end_impl.hh index 884136927..d7ed0b77a 100644 --- a/src/cpu/ozone/front_end_impl.hh +++ b/src/cpu/ozone/front_end_impl.hh @@ -52,7 +52,7 @@ Tick FrontEnd<Impl>::IcachePort::recvAtomic(PacketPtr pkt) { panic("FrontEnd doesn't expect recvAtomic callback!"); - return curTick; + return curTick(); } template<class Impl> @@ -432,7 +432,7 @@ FrontEnd<Impl>::tick() #if FULL_SYSTEM if (inst->isQuiesce()) { -// warn("%lli: Quiesce instruction encountered, halting fetch!", curTick); +// warn("%lli: Quiesce instruction encountered, halting fetch!", curTick()); status = QuiescePending; break; } @@ -894,7 +894,7 @@ FrontEnd<Impl>::getInstFromCacheline() instruction->staticInst->disassemble(PC)); instruction->traceData = - Trace::getInstRecord(curTick, tc, + Trace::getInstRecord(curTick(), tc, instruction->staticInst, instruction->readPC()); diff --git a/src/cpu/ozone/inorder_back_end.hh b/src/cpu/ozone/inorder_back_end.hh index 9c2699610..fcdc2a38a 100644 --- a/src/cpu/ozone/inorder_back_end.hh +++ b/src/cpu/ozone/inorder_back_end.hh @@ -210,7 +210,7 @@ InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags) if (fault == NoFault && dcacheInterface) { memReq->cmd = Read; memReq->completionEvent = NULL; - memReq->time = curTick; + memReq->time = curTick(); MemAccessResult result = dcacheInterface->access(memReq); // Ugly hack to get an event scheduled *only* if the access is @@ -220,7 +220,7 @@ InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags) // Fix this hack for keeping funcExeInst correct with loads that // are executed twice. memReq->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); status = DcacheMissLoadStall; DPRINTF(IBE, "Dcache miss stall!\n"); @@ -246,7 +246,7 @@ InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res) memReq->cmd = Write; // memcpy(memReq->data,(uint8_t *)&data,memReq->size); memReq->completionEvent = NULL; - memReq->time = curTick; + memReq->time = curTick(); MemAccessResult result = dcacheInterface->access(memReq); // Ugly hack to get an event scheduled *only* if the access is @@ -254,7 +254,7 @@ InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res) // at some point. if (result != MA_HIT) { memReq->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); status = DcacheMissStoreStall; DPRINTF(IBE, "Dcache miss stall!\n"); @@ -280,7 +280,7 @@ InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx) // Fault fault = cpu->translateDataReadReq(req); req->cmd = Read; req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); assert(!req->data); req->data = new uint8_t[64]; Fault fault = cpu->read(req, data); @@ -295,7 +295,7 @@ InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx) // at some point. if (result != MA_HIT) { req->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); status = DcacheMissLoadStall; DPRINTF(IBE, "Dcache miss load stall!\n"); @@ -320,7 +320,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx) req->cmd = Write; req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); assert(!req->data); req->data = new uint8_t[64]; memcpy(req->data, (uint8_t *)&data, req->size); @@ -347,7 +347,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx) req->data = new uint8_t[64]; memcpy(req->data,(uint8_t *)&data,req->size); req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); MemAccessResult result = dcacheInterface->access(req); // Ugly hack to get an event scheduled *only* if the access is @@ -355,7 +355,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx) // at some point. if (result != MA_HIT) { req->completionEvent = &cacheCompletionEvent; - lastDcacheStall = curTick; + lastDcacheStall = curTick(); // unscheduleTickEvent(); status = DcacheMissStoreStall; DPRINTF(IBE, "Dcache miss store stall!\n"); diff --git a/src/cpu/ozone/inst_queue_impl.hh b/src/cpu/ozone/inst_queue_impl.hh index ae2e3b09b..0068f2977 100644 --- a/src/cpu/ozone/inst_queue_impl.hh +++ b/src/cpu/ozone/inst_queue_impl.hh @@ -673,7 +673,7 @@ InstQueue<Impl>::scheduleReadyInsts() FUCompletion *execution = new FUCompletion(issuing_inst, idx, this); - execution->schedule(curTick + issue_latency - 1); + execution->schedule(curTick() + issue_latency - 1); } else { i2e_info->insts[exec_queue_slot++] = issuing_inst; i2e_info->size++; diff --git a/src/cpu/ozone/lsq_unit.hh b/src/cpu/ozone/lsq_unit.hh index d8e402b65..0216c5013 100644 --- a/src/cpu/ozone/lsq_unit.hh +++ b/src/cpu/ozone/lsq_unit.hh @@ -485,7 +485,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx) req->cmd = Read; assert(!req->completionEvent); req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); assert(!req->data); req->data = new uint8_t[64]; @@ -502,7 +502,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx) // We'll say this has a 1 cycle load-store forwarding latency // for now. // FIXME - Need to make this a parameter. - wb->schedule(curTick); + wb->schedule(curTick()); // Should keep track of stat for forwarded data return NoFault; @@ -562,7 +562,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx) // Setup MemReq pointer req->cmd = Read; req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); assert(!req->data); req->data = new uint8_t[64]; @@ -585,7 +585,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx) DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n", inst->seqNum); - lastDcacheStall = curTick; + lastDcacheStall = curTick(); _status = DcacheMissStall; diff --git a/src/cpu/ozone/lsq_unit_impl.hh b/src/cpu/ozone/lsq_unit_impl.hh index dd44adf6e..f36b870d8 100644 --- a/src/cpu/ozone/lsq_unit_impl.hh +++ b/src/cpu/ozone/lsq_unit_impl.hh @@ -557,7 +557,7 @@ OzoneLSQ<Impl>::writebackStores() // Fault fault = cpu->translateDataReadReq(req); req->cmd = Write; req->completionEvent = NULL; - req->time = curTick; + req->time = curTick(); assert(!req->data); req->data = new uint8_t[64]; memcpy(req->data, (uint8_t *)&storeQueue[storeWBIdx].data, req->size); @@ -615,7 +615,7 @@ OzoneLSQ<Impl>::writebackStores() req->completionEvent = new StoreCompletionEvent(storeWBIdx, wb, this); - lastDcacheStall = curTick; + lastDcacheStall = curTick(); _status = DcacheMissStall; @@ -637,7 +637,7 @@ OzoneLSQ<Impl>::writebackStores() typename BackEnd::LdWritebackEvent *wb = new typename BackEnd::LdWritebackEvent(storeQueue[storeWBIdx].inst, be); - wb->schedule(curTick); + wb->schedule(curTick()); } completeStore(storeWBIdx); diff --git a/src/cpu/ozone/lw_back_end_impl.hh b/src/cpu/ozone/lw_back_end_impl.hh index 465fccbdb..8000c142e 100644 --- a/src/cpu/ozone/lw_back_end_impl.hh +++ b/src/cpu/ozone/lw_back_end_impl.hh @@ -45,7 +45,7 @@ LWBackEnd<Impl>::generateTrapEvent(Tick latency) TrapEvent *trap = new TrapEvent(this); - trap->schedule(curTick + cpu->ticks(latency)); + trap->schedule(curTick() + cpu->ticks(latency)); thread->trapPending = true; } @@ -1226,7 +1226,7 @@ LWBackEnd<Impl>::commitInst(int inst_num) // Write the done sequence number here. toIEW->doneSeqNum = inst->seqNum; - lastCommitCycle = curTick; + lastCommitCycle = curTick(); #if FULL_SYSTEM int count = 0; diff --git a/src/cpu/ozone/lw_lsq.hh b/src/cpu/ozone/lw_lsq.hh index ee0312969..9605f175e 100644 --- a/src/cpu/ozone/lw_lsq.hh +++ b/src/cpu/ozone/lw_lsq.hh @@ -581,7 +581,7 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx) // We'll say this has a 1 cycle load-store forwarding latency // for now. // @todo: Need to make this a parameter. - wb->schedule(curTick); + wb->schedule(curTick()); // Should keep track of stat for forwarded data return NoFault; diff --git a/src/cpu/ozone/lw_lsq_impl.hh b/src/cpu/ozone/lw_lsq_impl.hh index c714c5d38..0c4e4b9c7 100644 --- a/src/cpu/ozone/lw_lsq_impl.hh +++ b/src/cpu/ozone/lw_lsq_impl.hh @@ -65,7 +65,7 @@ Tick OzoneLWLSQ<Impl>::DcachePort::recvAtomic(PacketPtr pkt) { panic("O3CPU model does not work with atomic mode!"); - return curTick; + return curTick(); } template <class Impl> @@ -677,7 +677,7 @@ OzoneLWLSQ<Impl>::writebackStores() be->addDcacheMiss(inst); - lastDcacheStall = curTick; + lastDcacheStall = curTick(); _status = DcacheMissStall; diff --git a/src/cpu/pc_event.cc b/src/cpu/pc_event.cc index 533d61498..09bd66819 100644 --- a/src/cpu/pc_event.cc +++ b/src/cpu/pc_event.cc @@ -111,7 +111,7 @@ PCEventQueue::dump() const const_iterator e = pc_map.end(); for (; i != e; ++i) - cprintf("%d: event at %#x: %s\n", curTick, (*i)->pc(), + cprintf("%d: event at %#x: %s\n", curTick(), (*i)->pc(), (*i)->descr()); } diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index de26ca2f8..35ad46158 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -267,7 +267,7 @@ AtomicSimpleCPU::activateContext(int thread_num, int delay) numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); //Make sure ticks are still on multiples of cycles - schedule(tickEvent, nextCycle(curTick + ticks(delay))); + schedule(tickEvent, nextCycle(curTick() + ticks(delay))); _status = Running; } @@ -731,7 +731,7 @@ AtomicSimpleCPU::tick() latency = ticks(1); if (_status != Idle) - schedule(tickEvent, curTick + latency); + schedule(tickEvent, curTick() + latency); } diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc index c993110e1..13ef0648c 100644 --- a/src/cpu/simple/base.cc +++ b/src/cpu/simple/base.cc @@ -330,7 +330,7 @@ BaseSimpleCPU::preExecute() if(curStaticInst) { #if TRACING_ON - traceData = tracer->getInstRecord(curTick, tc, + traceData = tracer->getInstRecord(curTick(), tc, curStaticInst, thread->pcState(), curMacroStaticInst); DPRINTF(Decode,"Decode: Decoded %s instruction: 0x%x\n", diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 7307f2fc9..9192c0808 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -85,7 +85,7 @@ Tick TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) { panic("TimingSimpleCPU doesn't expect recvAtomic callback!"); - return curTick; + return curTick(); } void @@ -189,7 +189,7 @@ TimingSimpleCPU::switchOut() { assert(_status == Running || _status == Idle); _status = SwitchedOut; - numCycles += tickToCycles(curTick - previousTick); + numCycles += tickToCycles(curTick() - previousTick); // If we've been scheduled to resume but are then told to switch out, // we'll need to cancel it. @@ -217,7 +217,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) _status = Idle; } assert(threadContexts.size() == 1); - previousTick = curTick; + previousTick = curTick(); } @@ -235,7 +235,7 @@ TimingSimpleCPU::activateContext(int thread_num, int delay) _status = Running; // kick things off by initiating the fetch of the next instruction - schedule(fetchEvent, nextCycle(curTick + ticks(delay))); + schedule(fetchEvent, nextCycle(curTick() + ticks(delay))); } @@ -266,7 +266,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt) if (req->isMmapedIpr()) { Tick delay; delay = TheISA::handleIprRead(thread->getTC(), pkt); - new IprEvent(pkt, this, nextCycle(curTick + delay)); + new IprEvent(pkt, this, nextCycle(curTick() + delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; } else if (!dcachePort.sendTiming(pkt)) { @@ -355,8 +355,8 @@ TimingSimpleCPU::translationFault(Fault fault) { // fault may be NoFault in cases where a fault is suppressed, // for instance prefetches. - numCycles += tickToCycles(curTick - previousTick); - previousTick = curTick; + numCycles += tickToCycles(curTick() - previousTick); + previousTick = curTick(); if (traceData) { // Since there was a fault, we shouldn't trace this instruction. @@ -538,7 +538,7 @@ TimingSimpleCPU::handleWritePacket() if (req->isMmapedIpr()) { Tick delay; delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); - new IprEvent(dcache_pkt, this, nextCycle(curTick + delay)); + new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; } else if (!dcachePort.sendTiming(dcache_pkt)) { @@ -726,8 +726,8 @@ TimingSimpleCPU::fetch() _status = IcacheWaitResponse; completeIfetch(NULL); - numCycles += tickToCycles(curTick - previousTick); - previousTick = curTick; + numCycles += tickToCycles(curTick() - previousTick); + previousTick = curTick(); } } @@ -754,8 +754,8 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) advanceInst(fault); } - numCycles += tickToCycles(curTick - previousTick); - previousTick = curTick; + numCycles += tickToCycles(curTick() - previousTick); + previousTick = curTick(); } @@ -787,8 +787,8 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt) _status = Running; - numCycles += tickToCycles(curTick - previousTick); - previousTick = curTick; + numCycles += tickToCycles(curTick() - previousTick); + previousTick = curTick(); if (getState() == SimObject::Draining) { if (pkt) { @@ -862,9 +862,9 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) { if (pkt->isResponse() && !pkt->wasNacked()) { // delay processing of returned data until next CPU clock edge - Tick next_tick = cpu->nextCycle(curTick); + Tick next_tick = cpu->nextCycle(curTick()); - if (next_tick == curTick) + if (next_tick == curTick()) cpu->completeIfetch(pkt); else tickEvent.schedule(pkt, next_tick); @@ -906,8 +906,8 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt) assert(_status == DcacheWaitResponse || _status == DTBWaitResponse || pkt->req->getFlags().isSet(Request::NO_ACCESS)); - numCycles += tickToCycles(curTick - previousTick); - previousTick = curTick; + numCycles += tickToCycles(curTick() - previousTick); + previousTick = curTick(); if (pkt->senderState) { SplitFragmentSenderState * send_state = @@ -994,9 +994,9 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) { if (pkt->isResponse() && !pkt->wasNacked()) { // delay processing of returned data until next CPU clock edge - Tick next_tick = cpu->nextCycle(curTick); + Tick next_tick = cpu->nextCycle(curTick()); - if (next_tick == curTick) { + if (next_tick == curTick()) { cpu->completeDataAccess(pkt); } else { if (!tickEvent.scheduled()) { diff --git a/src/cpu/simple_thread.cc b/src/cpu/simple_thread.cc index 51e27188c..61174dd4e 100644 --- a/src/cpu/simple_thread.cc +++ b/src/cpu/simple_thread.cc @@ -234,7 +234,7 @@ SimpleThread::activate(int delay) if (status() == ThreadContext::Active) return; - lastActivate = curTick; + lastActivate = curTick(); // if (status() == ThreadContext::Unallocated) { // cpu->activateWhenReady(_threadId); @@ -253,8 +253,8 @@ SimpleThread::suspend() if (status() == ThreadContext::Suspended) return; - lastActivate = curTick; - lastSuspend = curTick; + lastActivate = curTick(); + lastSuspend = curTick(); /* #if FULL_SYSTEM // Don't change the status from active if there are pending interrupts diff --git a/src/cpu/static_inst.cc b/src/cpu/static_inst.cc index f2a72c96a..f1ec05802 100644 --- a/src/cpu/static_inst.cc +++ b/src/cpu/static_inst.cc @@ -51,7 +51,7 @@ StaticInst::~StaticInst() void StaticInst::dumpDecodeCacheStats() { - cerr << "Decode hash table stats @ " << curTick << ":" << endl; + cerr << "Decode hash table stats @ " << curTick() << ":" << endl; cerr << "\tnum entries = " << decodeCache.size() << endl; cerr << "\tnum buckets = " << decodeCache.bucket_count() << endl; vector<int> hist(100, 0); diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.cc b/src/cpu/testers/directedtest/RubyDirectedTester.cc index 56352d14a..cc7c84dd3 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.cc +++ b/src/cpu/testers/directedtest/RubyDirectedTester.cc @@ -114,7 +114,7 @@ RubyDirectedTester::hitCallback(NodeID proc, Addr addr) addr); generator->performCallback(proc, addr); - schedule(directedStartEvent, curTick); + schedule(directedStartEvent, curTick()); } void @@ -122,7 +122,7 @@ RubyDirectedTester::wakeup() { if (m_requests_completed < m_requests_to_complete) { if (!generator->initiate()) { - schedule(directedStartEvent, curTick + 1); + schedule(directedStartEvent, curTick() + 1); } } else { exitSimLoop("Ruby DirectedTester completed"); diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index 6f3bbd77e..9440bfec2 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -69,14 +69,14 @@ MemTest::CpuPort::recvAtomic(PacketPtr pkt) // must be snoop upcall assert(pkt->isRequest()); assert(pkt->getDest() == Packet::Broadcast); - return curTick; + return curTick(); } void MemTest::CpuPort::recvFunctional(PacketPtr pkt) { //Do nothing if we see one come through -// if (curTick != 0)//Supress warning durring initialization +// if (curTick() != 0)//Supress warning durring initialization // warn("Functional Writes not implemented in MemTester\n"); //Need to find any response values that intersect and update return; @@ -220,7 +220,7 @@ MemTest::completeRequest(PacketPtr pkt) if (memcmp(pkt_data, data, pkt->getSize()) != 0) { panic("%s: read of %x (blk %x) @ cycle %d " "returns %x, expected %x\n", name(), - req->getPaddr(), blockAddr(req->getPaddr()), curTick, + req->getPaddr(), blockAddr(req->getPaddr()), curTick(), *pkt_data, *data); } @@ -229,7 +229,7 @@ MemTest::completeRequest(PacketPtr pkt) if (numReads == (uint64_t)nextProgressMessage) { ccprintf(cerr, "%s: completed %d read accesses @%d\n", - name(), numReads, curTick); + name(), numReads, curTick()); nextProgressMessage += progressInterval; } @@ -272,13 +272,13 @@ void MemTest::tick() { if (!tickEvent.scheduled()) - schedule(tickEvent, curTick + ticks(1)); + schedule(tickEvent, curTick() + ticks(1)); if (++noResponseCycles >= 500000) { if (issueDmas) { cerr << "DMA tester "; } - cerr << name() << ": deadlocked at cycle " << curTick << endl; + cerr << name() << ": deadlocked at cycle " << curTick() << endl; fatal(""); } diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc index 0d384b08a..a33351312 100644 --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -98,7 +98,7 @@ Check::initiatePrefetch() } // Prefetches are assumed to be 0 sized - Request *req = new Request(m_address.getAddress(), 0, flags, curTick, + Request *req = new Request(m_address.getAddress(), 0, flags, curTick(), m_pc.getAddress()); PacketPtr pkt = new Packet(req, cmd, port->idx); @@ -139,7 +139,7 @@ Check::initiateAction() Address writeAddr(m_address.getAddress() + m_store_count); // Stores are assumed to be 1 byte-sized - Request *req = new Request(writeAddr.getAddress(), 1, flags, curTick, + Request *req = new Request(writeAddr.getAddress(), 1, flags, curTick(), m_pc.getAddress()); Packet::Command cmd; @@ -205,7 +205,7 @@ Check::initiateCheck() // Checks are sized depending on the number of bytes written Request *req = new Request(m_address.getAddress(), CHECK_SIZE, flags, - curTick, m_pc.getAddress()); + curTick(), m_pc.getAddress()); PacketPtr pkt = new Packet(req, MemCmd::ReadReq, port->idx); uint8_t* dataArray = new uint8_t[CHECK_SIZE]; diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc index 8c5aafd89..1d477dad2 100644 --- a/src/cpu/testers/rubytest/RubyTester.cc +++ b/src/cpu/testers/rubytest/RubyTester.cc @@ -160,7 +160,7 @@ RubyTester::wakeup() checkForDeadlock(); - schedule(checkStartEvent, curTick + m_wakeup_frequency); + schedule(checkStartEvent, curTick() + m_wakeup_frequency); } else { exitSimLoop("Ruby Tester completed"); } diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc index b286f1e40..70aa1f042 100644 --- a/src/cpu/trace/trace_cpu.cc +++ b/src/cpu/trace/trace_cpu.cc @@ -66,13 +66,13 @@ TraceCPU::tick() int instReqs = 0; int dataReqs = 0; - while (nextReq && curTick >= nextCycle) { + while (nextReq && curTick() >= nextCycle) { assert(nextReq->thread_num < 4 && "Not enough threads"); if (nextReq->isInstFetch() && icacheInterface) { if (icacheInterface->isBlocked()) break; - nextReq->time = curTick; + nextReq->time = curTick(); if (nextReq->cmd == Squash) { icacheInterface->squash(nextReq->asid); } else { @@ -91,7 +91,7 @@ TraceCPU::tick() break; ++dataReqs; - nextReq->time = curTick; + nextReq->time = curTick(); if (dcacheInterface->doEvents()) { nextReq->completionEvent = new TraceCompleteEvent(nextReq, this); @@ -113,7 +113,7 @@ TraceCPU::tick() tickEvent.schedule(mainEventQueue.nextEventTime() + ticks(1)); } } else { - tickEvent.schedule(max(curTick + ticks(1), nextCycle)); + tickEvent.schedule(max(curTick() + ticks(1), nextCycle)); } } |