summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/alpha/isa/decoder.isa2
-rw-r--r--src/arch/alpha/kernel_stats.cc4
-rw-r--r--src/arch/alpha/tru64/process.cc2
-rw-r--r--src/arch/arm/table_walker.cc2
-rw-r--r--src/arch/mips/isa.cc4
-rw-r--r--src/arch/mips/isa/formats/mt.isa4
-rw-r--r--src/arch/mips/locked_mem.hh2
-rwxr-xr-xsrc/arch/mips/mt.hh6
-rw-r--r--src/arch/sparc/ua2005.cc10
-rw-r--r--src/arch/x86/interrupts.cc8
-rw-r--r--src/base/cp_annotate.cc12
-rw-r--r--src/base/cp_annotate.hh2
-rw-r--r--src/base/fast_alloc.cc4
-rw-r--r--src/base/misc.cc4
-rw-r--r--src/base/remote_gdb.cc4
-rw-r--r--src/base/statistics.hh20
-rw-r--r--src/base/stats/mysql.cc2
-rw-r--r--src/base/stats/output.cc6
-rw-r--r--src/base/trace.hh10
-rw-r--r--src/cpu/base.cc22
-rw-r--r--src/cpu/base.hh6
-rw-r--r--src/cpu/checker/cpu.cc4
-rw-r--r--src/cpu/checker/cpu_impl.hh18
-rw-r--r--src/cpu/inorder/cpu.cc28
-rw-r--r--src/cpu/inorder/cpu.hh2
-rw-r--r--src/cpu/inorder/inorder_dyn_inst.cc8
-rw-r--r--src/cpu/inorder/pipeline_stage.cc10
-rw-r--r--src/cpu/inorder/reg_dep_map.cc4
-rw-r--r--src/cpu/inorder/resource.cc4
-rw-r--r--src/cpu/inorder/resource_pool.9stage.cc20
-rw-r--r--src/cpu/inorder/resource_pool.cc16
-rw-r--r--src/cpu/inorder/resources/branch_predictor.cc4
-rw-r--r--src/cpu/inorder/resources/cache_unit.cc8
-rw-r--r--src/cpu/inorder/resources/execution_unit.cc6
-rw-r--r--src/cpu/inorder/resources/fetch_seq_unit.cc4
-rw-r--r--src/cpu/inorder/resources/graduation_unit.cc6
-rw-r--r--src/cpu/inorder/resources/mult_div_unit.cc4
-rw-r--r--src/cpu/o3/commit_impl.hh2
-rw-r--r--src/cpu/o3/cpu.cc20
-rw-r--r--src/cpu/o3/cpu.hh12
-rw-r--r--src/cpu/o3/fetch_impl.hh6
-rw-r--r--src/cpu/o3/inst_queue_impl.hh2
-rw-r--r--src/cpu/o3/lsq_impl.hh2
-rw-r--r--src/cpu/o3/lsq_unit.hh2
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh2
-rwxr-xr-xsrc/cpu/o3/thread_context_impl.hh6
-rw-r--r--src/cpu/ozone/back_end.hh8
-rw-r--r--src/cpu/ozone/cpu.hh4
-rw-r--r--src/cpu/ozone/cpu_impl.hh4
-rw-r--r--src/cpu/ozone/front_end_impl.hh6
-rw-r--r--src/cpu/ozone/inorder_back_end.hh18
-rw-r--r--src/cpu/ozone/inst_queue_impl.hh2
-rw-r--r--src/cpu/ozone/lsq_unit.hh8
-rw-r--r--src/cpu/ozone/lsq_unit_impl.hh6
-rw-r--r--src/cpu/ozone/lw_back_end_impl.hh4
-rw-r--r--src/cpu/ozone/lw_lsq.hh2
-rw-r--r--src/cpu/ozone/lw_lsq_impl.hh4
-rw-r--r--src/cpu/pc_event.cc2
-rw-r--r--src/cpu/simple/atomic.cc4
-rw-r--r--src/cpu/simple/base.cc2
-rw-r--r--src/cpu/simple/timing.cc40
-rw-r--r--src/cpu/simple_thread.cc6
-rw-r--r--src/cpu/static_inst.cc2
-rw-r--r--src/cpu/testers/directedtest/RubyDirectedTester.cc4
-rw-r--r--src/cpu/testers/memtest/memtest.cc12
-rw-r--r--src/cpu/testers/rubytest/Check.cc6
-rw-r--r--src/cpu/testers/rubytest/RubyTester.cc2
-rw-r--r--src/cpu/trace/trace_cpu.cc8
-rw-r--r--src/dev/alpha/backdoor.cc2
-rw-r--r--src/dev/arm/pl011.cc6
-rw-r--r--src/dev/arm/pl111.cc12
-rw-r--r--src/dev/arm/rv_ctrl.cc2
-rw-r--r--src/dev/arm/timer_sp804.cc6
-rw-r--r--src/dev/etherbus.cc4
-rw-r--r--src/dev/etherdump.cc4
-rw-r--r--src/dev/etherlink.cc4
-rw-r--r--src/dev/ethertap.cc4
-rw-r--r--src/dev/i8254xGBe.cc30
-rw-r--r--src/dev/ide_disk.cc12
-rw-r--r--src/dev/intel_8254_timer.cc6
-rw-r--r--src/dev/io_device.cc14
-rw-r--r--src/dev/mc146818.cc16
-rw-r--r--src/dev/mc146818.hh2
-rw-r--r--src/dev/ns_gige.cc30
-rw-r--r--src/dev/sinic.cc26
-rw-r--r--src/dev/uart8250.cc14
-rw-r--r--src/kern/kernel_stats.cc4
-rw-r--r--src/mem/bridge.cc10
-rw-r--r--src/mem/bus.cc24
-rw-r--r--src/mem/cache/base.cc2
-rw-r--r--src/mem/cache/base.hh4
-rw-r--r--src/mem/cache/blk.hh2
-rw-r--r--src/mem/cache/cache_impl.hh18
-rw-r--r--src/mem/cache/mshr.cc4
-rw-r--r--src/mem/cache/mshr.hh2
-rw-r--r--src/mem/cache/mshr_queue.hh2
-rw-r--r--src/mem/cache/tags/fa_lru.cc2
-rw-r--r--src/mem/cache/tags/iic.cc6
-rw-r--r--src/mem/cache/tags/lru.cc8
-rw-r--r--src/mem/dram.cc72
-rw-r--r--src/mem/mport.cc2
-rw-r--r--src/mem/packet.hh6
-rw-r--r--src/mem/request.hh8
-rw-r--r--src/mem/ruby/eventqueue/RubyEventQueue.hh2
-rw-r--r--src/mem/ruby/system/RubyPort.cc4
-rw-r--r--src/mem/ruby/system/Sequencer.cc4
-rw-r--r--src/mem/ruby/system/System.cc2
-rw-r--r--src/mem/tport.cc8
-rw-r--r--src/mem/tport.hh4
-rw-r--r--src/python/m5/simulate.py2
-rw-r--r--src/python/swig/core.i3
-rw-r--r--src/python/swig/stats.i3
-rw-r--r--src/sim/core.cc4
-rw-r--r--src/sim/core.hh8
-rw-r--r--src/sim/eventq.cc2
-rw-r--r--src/sim/eventq.hh14
-rw-r--r--src/sim/init.cc2
-rw-r--r--src/sim/pseudo_inst.cc16
-rw-r--r--src/sim/serialize.cc10
-rw-r--r--src/sim/serialize.hh2
-rw-r--r--src/sim/sim_events.cc2
-rw-r--r--src/sim/sim_exit.hh4
-rw-r--r--src/sim/sim_object.hh2
-rw-r--r--src/sim/simulate.cc10
-rw-r--r--src/sim/stat_control.cc6
-rw-r--r--src/sim/stat_control.hh2
-rw-r--r--src/sim/syscall_emul.cc4
-rw-r--r--src/sim/syscall_emul.hh4
-rw-r--r--src/unittest/stattest.cc40
129 files changed, 515 insertions, 509 deletions
diff --git a/src/arch/alpha/isa/decoder.isa b/src/arch/alpha/isa/decoder.isa
index d829ad744..f0aa5a3fd 100644
--- a/src/arch/alpha/isa/decoder.isa
+++ b/src/arch/alpha/isa/decoder.isa
@@ -791,7 +791,7 @@ decode OPCODE default Unknown::unknown() {
Ra = xc->readMiscReg(IPR_CC) + (Rb & 0);
#else
- Ra = curTick;
+ Ra = curTick();
#endif
}}, IsUnverifiable);
diff --git a/src/arch/alpha/kernel_stats.cc b/src/arch/alpha/kernel_stats.cc
index 6e9dc1611..70eeadd8e 100644
--- a/src/arch/alpha/kernel_stats.cc
+++ b/src/arch/alpha/kernel_stats.cc
@@ -143,9 +143,9 @@ Statistics::changeMode(cpu_mode newmode, ThreadContext *tc)
Linux::ThreadInfo(tc).curTaskPID());
_modeGood[newmode]++;
- _modeTicks[themode] += curTick - lastModeTick;
+ _modeTicks[themode] += curTick() - lastModeTick;
- lastModeTick = curTick;
+ lastModeTick = curTick();
themode = newmode;
}
diff --git a/src/arch/alpha/tru64/process.cc b/src/arch/alpha/tru64/process.cc
index 9aae7e155..b10fea02b 100644
--- a/src/arch/alpha/tru64/process.cc
+++ b/src/arch/alpha/tru64/process.cc
@@ -184,7 +184,7 @@ tableFunc(SyscallDesc *desc, int callnum, LiveProcess *process,
TypedBufferArg<Tru64::tbl_sysinfo> elp(bufPtr);
const int clk_hz = one_million;
- elp->si_user = htog(curTick / (SimClock::Frequency / clk_hz));
+ elp->si_user = htog(curTick() / (SimClock::Frequency / clk_hz));
elp->si_nice = htog(0);
elp->si_sys = htog(0);
elp->si_idle = htog(0);
diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc
index 88f2a455f..6b2113639 100644
--- a/src/arch/arm/table_walker.cc
+++ b/src/arch/arm/table_walker.cc
@@ -713,7 +713,7 @@ void
TableWalker::nextWalk(ThreadContext *tc)
{
if (pendingQueue.size())
- schedule(doProcessEvent, tc->getCpuPtr()->nextCycle(curTick+1));
+ schedule(doProcessEvent, tc->getCpuPtr()->nextCycle(curTick()+1));
}
diff --git a/src/arch/mips/isa.cc b/src/arch/mips/isa.cc
index 1cad7e4be..5cd65cfac 100644
--- a/src/arch/mips/isa.cc
+++ b/src/arch/mips/isa.cc
@@ -528,7 +528,7 @@ ISA::scheduleCP0Update(BaseCPU *cpu, int delay)
//schedule UPDATE
CP0Event *cp0_event = new CP0Event(this, cpu, UpdateCP0);
- cpu->schedule(cp0_event, curTick + cpu->ticks(delay));
+ cpu->schedule(cp0_event, curTick() + cpu->ticks(delay));
}
}
@@ -585,7 +585,7 @@ ISA::CP0Event::description() const
void
ISA::CP0Event::scheduleEvent(int delay)
{
- cpu->reschedule(this, curTick + cpu->ticks(delay), true);
+ cpu->reschedule(this, curTick() + cpu->ticks(delay), true);
}
void
diff --git a/src/arch/mips/isa/formats/mt.isa b/src/arch/mips/isa/formats/mt.isa
index 9d354c46a..1944d69d3 100644
--- a/src/arch/mips/isa/formats/mt.isa
+++ b/src/arch/mips/isa/formats/mt.isa
@@ -201,7 +201,7 @@ def format MT_Control(code, *opt_flags) {{
def format MT_MFTR(code, *flags) {{
flags += ('IsNonSpeculative', )
-# code = 'std::cerr << curTick << \": T\" << xc->tcBase()->threadId() << \": Executing MT INST: ' + name + '\" << endl;\n' + code
+# code = 'std::cerr << curTick() << \": T\" << xc->tcBase()->threadId() << \": Executing MT INST: ' + name + '\" << endl;\n' + code
code += 'if (MT_H == 1) {\n'
code += 'data = bits(data, top_bit, bottom_bit);\n'
@@ -217,7 +217,7 @@ def format MT_MFTR(code, *flags) {{
def format MT_MTTR(code, *flags) {{
flags += ('IsNonSpeculative', )
-# code = 'std::cerr << curTick << \": T\" << xc->tcBase()->threadId() << \": Executing MT INST: ' + name + '\" << endl;\n' + code
+# code = 'std::cerr << curTick() << \": T\" << xc->tcBase()->threadId() << \": Executing MT INST: ' + name + '\" << endl;\n' + code
iop = InstObjParams(name, Name, 'MTOp', code, flags)
header_output = BasicDeclare.subst(iop)
decoder_output = BasicConstructor.subst(iop)
diff --git a/src/arch/mips/locked_mem.hh b/src/arch/mips/locked_mem.hh
index ddda47a0a..1cc08ee3d 100644
--- a/src/arch/mips/locked_mem.hh
+++ b/src/arch/mips/locked_mem.hh
@@ -85,7 +85,7 @@ handleLockedWrite(XC *xc, Request *req)
if (stCondFailures % 100000 == 0) {
warn("%i: context %d: %d consecutive "
"store conditional failures\n",
- curTick, xc->contextId(), stCondFailures);
+ curTick(), xc->contextId(), stCondFailures);
}
if (!lock_flag){
diff --git a/src/arch/mips/mt.hh b/src/arch/mips/mt.hh
index 3ec6cbe70..c63c65a73 100755
--- a/src/arch/mips/mt.hh
+++ b/src/arch/mips/mt.hh
@@ -81,7 +81,7 @@ haltThread(TC *tc)
tc->setMiscReg(MISCREG_TC_RESTART, pc.npc());
warn("%i: Halting thread %i in %s @ PC %x, setting restart PC to %x",
- curTick, tc->threadId(), tc->getCpuPtr()->name(),
+ curTick(), tc->threadId(), tc->getCpuPtr()->name(),
pc.pc(), pc.npc());
}
}
@@ -99,7 +99,7 @@ restoreThread(TC *tc)
tc->activate(0);
warn("%i: Restoring thread %i in %s @ PC %x",
- curTick, tc->threadId(), tc->getCpuPtr()->name(), restartPC);
+ curTick(), tc->threadId(), tc->getCpuPtr()->name(), restartPC);
}
}
@@ -208,7 +208,7 @@ yieldThread(TC *tc, Fault &fault, int src_reg, uint32_t yield_mask)
tcStatus.a = 0;
tc->setMiscReg(MISCREG_TC_STATUS, tcStatus);
warn("%i: Deactivating Hardware Thread Context #%i",
- curTick, tc->threadId());
+ curTick(), tc->threadId());
}
} else if (src_reg > 0) {
if (src_reg && !yield_mask != 0) {
diff --git a/src/arch/sparc/ua2005.cc b/src/arch/sparc/ua2005.cc
index bd6497b25..efab8b832 100644
--- a/src/arch/sparc/ua2005.cc
+++ b/src/arch/sparc/ua2005.cc
@@ -111,7 +111,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(tick_cmpr & ~mask(63)) && time > 0) {
if (tickCompare->scheduled())
cpu->deschedule(tickCompare);
- cpu->schedule(tickCompare, curTick + time * cpu->ticks(1));
+ cpu->schedule(tickCompare, curTick() + time * cpu->ticks(1));
}
panic("writing to TICK compare register %#X\n", val);
break;
@@ -127,7 +127,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(stick_cmpr & ~mask(63)) && time > 0) {
if (sTickCompare->scheduled())
cpu->deschedule(sTickCompare);
- cpu->schedule(sTickCompare, curTick + time * cpu->ticks(1));
+ cpu->schedule(sTickCompare, curTick() + time * cpu->ticks(1));
}
DPRINTF(Timer, "writing to sTICK compare register value %#X\n", val);
break;
@@ -197,7 +197,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(hstick_cmpr & ~mask(63)) && time > 0) {
if (hSTickCompare->scheduled())
cpu->deschedule(hSTickCompare);
- cpu->schedule(hSTickCompare, curTick + time * cpu->ticks(1));
+ cpu->schedule(hSTickCompare, curTick() + time * cpu->ticks(1));
}
DPRINTF(Timer, "writing to hsTICK compare register value %#X\n", val);
break;
@@ -335,7 +335,7 @@ ISA::processSTickCompare(ThreadContext *tc)
setMiscReg(MISCREG_SOFTINT, softint | (ULL(1) << 16), tc);
}
} else {
- cpu->schedule(sTickCompare, curTick + ticks * cpu->ticks(1));
+ cpu->schedule(sTickCompare, curTick() + ticks * cpu->ticks(1));
}
}
@@ -363,7 +363,7 @@ ISA::processHSTickCompare(ThreadContext *tc)
}
// Need to do something to cause interrupt to happen here !!! @todo
} else {
- cpu->schedule(hSTickCompare, curTick + ticks * cpu->ticks(1));
+ cpu->schedule(hSTickCompare, curTick() + ticks * cpu->ticks(1));
}
}
diff --git a/src/arch/x86/interrupts.cc b/src/arch/x86/interrupts.cc
index cc1d442fe..951392a15 100644
--- a/src/arch/x86/interrupts.cc
+++ b/src/arch/x86/interrupts.cc
@@ -394,7 +394,7 @@ X86ISA::Interrupts::readReg(ApicRegIndex reg)
uint64_t ticksPerCount = clock *
divideFromConf(regs[APIC_DIVIDE_CONFIGURATION]);
// Compute how many m5 ticks are left.
- uint64_t val = apicTimerEvent.when() - curTick;
+ uint64_t val = apicTimerEvent.when() - curTick();
// Turn that into a count.
val = (val + ticksPerCount - 1) / ticksPerCount;
return val;
@@ -572,13 +572,13 @@ X86ISA::Interrupts::setReg(ApicRegIndex reg, uint32_t val)
uint64_t newCount = newVal *
(divideFromConf(regs[APIC_DIVIDE_CONFIGURATION]));
// Schedule on the edge of the next tick plus the new count.
- Tick offset = curTick % clock;
+ Tick offset = curTick() % clock;
if (offset) {
reschedule(apicTimerEvent,
- curTick + (newCount + 1) * clock - offset, true);
+ curTick() + (newCount + 1) * clock - offset, true);
} else {
reschedule(apicTimerEvent,
- curTick + newCount * clock, true);
+ curTick() + newCount * clock, true);
}
}
break;
diff --git a/src/base/cp_annotate.cc b/src/base/cp_annotate.cc
index 4e138a6dd..69b926b29 100644
--- a/src/base/cp_annotate.cc
+++ b/src/base/cp_annotate.cc
@@ -161,7 +161,7 @@ CPA::swSmBegin(ThreadContext *tc)
StringWrap name(sys->name());
if (!sm[0])
- warn("Got null SM at tick %d\n", curTick);
+ warn("Got null SM at tick %d\n", curTick());
int sysi = getSys(sys);
int smi = getSm(sysi, sm, args[1]);
@@ -273,7 +273,7 @@ CPA::doSwSmEnd(System *sys, int cpuid, string sm, uint64_t frame)
DPRINTF(Annotate, "Ending machine: %s; end stack: %s\n", sm,
smMap[smib-1].second.first);
- warn("State machine stack not unwinding correctly at %d\n", curTick);
+ warn("State machine stack not unwinding correctly at %d\n", curTick());
} else {
DPRINTF(Annotate,
"State machine ending:%s sysi:%d id:%#x back:%d getSm:%d\n",
@@ -316,7 +316,7 @@ CPA::swExplictBegin(ThreadContext *tc)
DPRINTF(Annotate, "Explict begin of state %s\n", st);
uint32_t flags = args[0];
if (flags & FL_BAD)
- warn("BAD state encountered: at cycle %d: %s\n", curTick, st);
+ warn("BAD state encountered: at cycle %d: %s\n", curTick(), st);
swBegin(tc->getSystemPtr(), tc->contextId(), st, getFrame(tc), true, args[0]);
}
@@ -688,10 +688,10 @@ CPA::swAq(ThreadContext *tc)
warn("%d: Queue Assert: SW said there should be %d byte(s) in %s,"
"however there are %d byte(s)\n",
- curTick, size, q, qBytes[qi-1]);
+ curTick(), size, q, qBytes[qi-1]);
DPRINTF(AnnotateQ, "%d: Queue Assert: SW said there should be %d"
" byte(s) in %s, however there are %d byte(s)\n",
- curTick, size, q, qBytes[qi-1]);
+ curTick(), size, q, qBytes[qi-1]);
}
}
@@ -813,7 +813,7 @@ CPA::AnnDataPtr
CPA::add(int t, int f, int c, int sm, int stq, int32_t d)
{
AnnDataPtr an = new AnnotateData;
- an->time = curTick;
+ an->time = curTick();
an->data = d;
an->orig_data = d;
an->op = t;
diff --git a/src/base/cp_annotate.hh b/src/base/cp_annotate.hh
index 8ce9995a8..fb955a380 100644
--- a/src/base/cp_annotate.hh
+++ b/src/base/cp_annotate.hh
@@ -408,7 +408,7 @@ class CPA : SimObject
int smi = getSm(sysi, sm, frame);
add(OP_BEGIN, FL_HW | f, 0, smi, getSt(sm, st));
if (f & FL_BAD)
- warn("BAD state encountered: at cycle %d: %s\n", curTick, st);
+ warn("BAD state encountered: at cycle %d: %s\n", curTick(), st);
}
inline void hwQ(flags f, System *sys, uint64_t frame, std::string sm,
diff --git a/src/base/fast_alloc.cc b/src/base/fast_alloc.cc
index 0238f03cb..649f94be3 100644
--- a/src/base/fast_alloc.cc
+++ b/src/base/fast_alloc.cc
@@ -80,7 +80,7 @@ FastAlloc::moreStructs(int bucket)
#include <typeinfo>
#include "base/cprintf.hh"
-#include "sim/core.hh" // for curTick
+#include "sim/core.hh" // for curTick()
using namespace std;
@@ -104,7 +104,7 @@ FastAlloc::FastAlloc()
{
// mark this object in use
inUse = true;
- whenAllocated = curTick;
+ whenAllocated = curTick();
// update count
++numInUse;
diff --git a/src/base/misc.cc b/src/base/misc.cc
index 65cb13356..94336c647 100644
--- a/src/base/misc.cc
+++ b/src/base/misc.cc
@@ -76,7 +76,7 @@ __exit_message(const char *prefix, int code,
format += "Memory Usage: %ld KBytes\n";
format += "For more information see: http://www.m5sim.org/%s/%x\n";
- args.push_back(curTick);
+ args.push_back(curTick());
args.push_back(func);
args.push_back(file);
args.push_back(line);
@@ -114,7 +114,7 @@ __base_message(std::ostream &stream, const char *prefix, bool verbose,
if (verbose) {
format += " @ cycle %d\n[%s:%s, line %d]\n";
- args.push_back(curTick);
+ args.push_back(curTick());
args.push_back(func);
args.push_back(file);
args.push_back(line);
diff --git a/src/base/remote_gdb.cc b/src/base/remote_gdb.cc
index b7c34859b..abc5f4ec0 100644
--- a/src/base/remote_gdb.cc
+++ b/src/base/remote_gdb.cc
@@ -217,10 +217,10 @@ GDBListener::listen()
#ifndef NDEBUG
ccprintf(cerr, "%d: %s: listening for remote gdb #%d on port %d\n",
- curTick, name(), gdb->number, port);
+ curTick(), name(), gdb->number, port);
#else
ccprintf(cerr, "%d: %s: listening for remote gdb on port %d\n",
- curTick, name(), port);
+ curTick(), name(), port);
#endif
}
diff --git a/src/base/statistics.hh b/src/base/statistics.hh
index 3bb74282a..9c10164a9 100644
--- a/src/base/statistics.hh
+++ b/src/base/statistics.hh
@@ -72,7 +72,7 @@
class Callback;
/** The current simulated tick. */
-extern Tick curTick;
+extern Tick curTick();
/* A namespace for all of the Statistics */
namespace Stats {
@@ -530,8 +530,8 @@ class AvgStor
void
set(Counter val)
{
- total += current * (curTick - last);
- last = curTick;
+ total += current * (curTick() - last);
+ last = curTick();
current = val;
}
@@ -560,8 +560,8 @@ class AvgStor
Result
result() const
{
- assert(last == curTick);
- return (Result)(total + current) / (Result)(curTick - lastReset + 1);
+ assert(last == curTick());
+ return (Result)(total + current) / (Result)(curTick() - lastReset + 1);
}
/**
@@ -575,8 +575,8 @@ class AvgStor
void
prepare(Info *info)
{
- total += current * (curTick - last);
- last = curTick;
+ total += current * (curTick() - last);
+ last = curTick();
}
/**
@@ -586,8 +586,8 @@ class AvgStor
reset(Info *info)
{
total = 0.0;
- last = curTick;
- lastReset = curTick;
+ last = curTick();
+ lastReset = curTick();
}
};
@@ -1576,7 +1576,7 @@ class AvgSampleStor
data.type = params->type;
data.sum = sum;
data.squares = squares;
- data.samples = curTick;
+ data.samples = curTick();
}
/**
diff --git a/src/base/stats/mysql.cc b/src/base/stats/mysql.cc
index d947122da..d257de743 100644
--- a/src/base/stats/mysql.cc
+++ b/src/base/stats/mysql.cc
@@ -613,7 +613,7 @@ MySql::output()
configure();
// store sample #
- newdata.tick = curTick;
+ newdata.tick = curTick();
MySQL::Connection &mysql = run->conn();
diff --git a/src/base/stats/output.cc b/src/base/stats/output.cc
index 9d110e4ee..d3cb9da61 100644
--- a/src/base/stats/output.cc
+++ b/src/base/stats/output.cc
@@ -45,10 +45,10 @@ list<Output *> OutputList;
void
dump()
{
- assert(lastDump <= curTick);
- if (lastDump == curTick)
+ assert(lastDump <= curTick());
+ if (lastDump == curTick())
return;
- lastDump = curTick;
+ lastDump = curTick();
prepare();
diff --git a/src/base/trace.hh b/src/base/trace.hh
index f793abff9..a03a34018 100644
--- a/src/base/trace.hh
+++ b/src/base/trace.hh
@@ -89,17 +89,17 @@ inline const std::string &name() { return Trace::DefaultName; }
#define DDUMP(x, data, count) do { \
if (DTRACE(x)) \
- Trace::dump(curTick, name(), data, count); \
+ Trace::dump(curTick(), name(), data, count); \
} while (0)
#define DPRINTF(x, ...) do { \
if (DTRACE(x)) \
- Trace::dprintf(curTick, name(), __VA_ARGS__); \
+ Trace::dprintf(curTick(), name(), __VA_ARGS__); \
} while (0)
#define DPRINTFS(x,s, ...) do { \
if (DTRACE(x)) \
- Trace::dprintf(curTick, s->name(), __VA_ARGS__); \
+ Trace::dprintf(curTick(), s->name(), __VA_ARGS__); \
} while (0)
@@ -109,11 +109,11 @@ inline const std::string &name() { return Trace::DefaultName; }
} while (0)
#define DDUMPN(data, count) do { \
- Trace::dump(curTick, name(), data, count); \
+ Trace::dump(curTick(), name(), data, count); \
} while (0)
#define DPRINTFN(...) do { \
- Trace::dprintf(curTick, name(), __VA_ARGS__); \
+ Trace::dprintf(curTick(), name(), __VA_ARGS__); \
} while (0)
#define DPRINTFNR(...) do { \
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index e0d29577d..1816568ce 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -66,7 +66,7 @@ CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
cpu(_cpu), _repeatEvent(true)
{
if (_interval)
- cpu->schedule(this, curTick + _interval);
+ cpu->schedule(this, curTick() + _interval);
}
void
@@ -82,13 +82,13 @@ CPUProgressEvent::process()
ipc = 0.0;
#else
cprintf("%lli: %s progress event, total committed:%i, progress insts "
- "committed: %lli\n", curTick, cpu->name(), temp,
+ "committed: %lli\n", curTick(), cpu->name(), temp,
temp - lastNumInst);
#endif
lastNumInst = temp;
if (_repeatEvent)
- cpu->schedule(this, curTick + _interval);
+ cpu->schedule(this, curTick() + _interval);
}
const char *
@@ -110,7 +110,7 @@ BaseCPU::BaseCPU(Params *p)
phase(p->phase)
#endif
{
-// currentTick = curTick;
+// currentTick = curTick();
// if Python did not provide a valid ID, do it here
if (_cpuId == -1 ) {
@@ -231,7 +231,7 @@ BaseCPU::startup()
{
#if FULL_SYSTEM
if (!params()->defer_registration && profileEvent)
- schedule(profileEvent, curTick);
+ schedule(profileEvent, curTick());
#endif
if (params()->progress_interval) {
@@ -270,7 +270,7 @@ BaseCPU::regStats()
Tick
BaseCPU::nextCycle()
{
- Tick next_tick = curTick - phase + clock - 1;
+ Tick next_tick = curTick() - phase + clock - 1;
next_tick -= (next_tick % clock);
next_tick += phase;
return next_tick;
@@ -284,7 +284,7 @@ BaseCPU::nextCycle(Tick begin_tick)
next_tick = next_tick - (next_tick % clock) + clock;
next_tick += phase;
- assert(next_tick >= curTick);
+ assert(next_tick >= curTick());
return next_tick;
}
@@ -390,7 +390,7 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
threadContexts[i]->profileClear();
if (profileEvent)
- schedule(profileEvent, curTick);
+ schedule(profileEvent, curTick());
#endif
// Connect new CPU to old CPU's memory only if new CPU isn't
@@ -424,7 +424,7 @@ BaseCPU::ProfileEvent::process()
tc->profileSample();
}
- cpu->schedule(this, curTick + interval);
+ cpu->schedule(this, curTick() + interval);
}
void
@@ -465,7 +465,7 @@ BaseCPU::traceFunctionsInternal(Addr pc)
}
ccprintf(*functionTraceStream, " (%d)\n%d: %s",
- curTick - functionEntryTick, curTick, sym_str);
- functionEntryTick = curTick;
+ curTick() - functionEntryTick, curTick(), sym_str);
+ functionEntryTick = curTick();
}
}
diff --git a/src/cpu/base.hh b/src/cpu/base.hh
index 5b03d904f..e0491a84a 100644
--- a/src/cpu/base.hh
+++ b/src/cpu/base.hh
@@ -100,20 +100,20 @@ class BaseCPU : public MemObject
// Tick currentTick;
inline Tick frequency() const { return SimClock::Frequency / clock; }
inline Tick ticks(int numCycles) const { return clock * numCycles; }
- inline Tick curCycle() const { return curTick / clock; }
+ inline Tick curCycle() const { return curTick() / clock; }
inline Tick tickToCycles(Tick val) const { return val / clock; }
// @todo remove me after debugging with legion done
Tick instCount() { return instCnt; }
/** The next cycle the CPU should be scheduled, given a cache
* access or quiesce event returning on this cycle. This function
- * may return curTick if the CPU should run on the current cycle.
+ * may return curTick() if the CPU should run on the current cycle.
*/
Tick nextCycle();
/** The next cycle the CPU should be scheduled, given a cache
* access or quiesce event returning on the given Tick. This
- * function may return curTick if the CPU should run on the
+ * function may return curTick() if the CPU should run on the
* current cycle.
* @param begin_tick The tick that the event is completing on.
*/
diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc
index 10dd77899..079057765 100644
--- a/src/cpu/checker/cpu.cc
+++ b/src/cpu/checker/cpu.cc
@@ -245,7 +245,7 @@ CheckerCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
if (data != inst_data) {
warn("%lli: Store value does not match value in memory! "
"Instruction: %#x, memory: %#x",
- curTick, inst_data, data);
+ curTick(), inst_data, data);
handleError();
}
}
@@ -327,6 +327,6 @@ void
CheckerCPU::dumpAndExit()
{
warn("%lli: Checker PC:%#x, next PC:%#x",
- curTick, thread->readPC(), thread->readNextPC());
+ curTick(), thread->readPC(), thread->readNextPC());
panic("Checker found an error!");
}
diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh
index 10a9d1177..8197d560d 100644
--- a/src/cpu/checker/cpu_impl.hh
+++ b/src/cpu/checker/cpu_impl.hh
@@ -126,7 +126,7 @@ Checker<DynInstPtr>::verify(DynInstPtr &completed_inst)
} else {
warn("%lli: Changed PC does not match expected PC, "
"changed: %#x, expected: %#x",
- curTick, thread->readPC(), newPC);
+ curTick(), thread->readPC(), newPC);
CheckerCPU::handleError();
}
willChangePC = false;
@@ -166,7 +166,7 @@ Checker<DynInstPtr>::verify(DynInstPtr &completed_inst)
// translate this instruction; in the SMT case it's
// possible that its ITB entry was kicked out.
warn("%lli: Instruction PC %#x was not found in the ITB!",
- curTick, thread->readPC());
+ curTick(), thread->readPC());
handleError(inst);
// go to the next instruction
@@ -315,10 +315,10 @@ Checker<DynInstPtr>::validateInst(DynInstPtr &inst)
{
if (inst->readPC() != thread->readPC()) {
warn("%lli: PCs do not match! Inst: %#x, checker: %#x",
- curTick, inst->readPC(), thread->readPC());
+ curTick(), inst->readPC(), thread->readPC());
if (changedPC) {
warn("%lli: Changed PCs recently, may not be an error",
- curTick);
+ curTick());
} else {
handleError(inst);
}
@@ -329,7 +329,7 @@ Checker<DynInstPtr>::validateInst(DynInstPtr &inst)
if (mi != machInst) {
warn("%lli: Binary instructions do not match! Inst: %#x, "
"checker: %#x",
- curTick, mi, machInst);
+ curTick(), mi, machInst);
handleError(inst);
}
}
@@ -354,7 +354,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst)
if (result_mismatch) {
warn("%lli: Instruction results do not match! (Values may not "
"actually be integers) Inst: %#x, checker: %#x",
- curTick, inst->readIntResult(), result.integer);
+ curTick(), inst->readIntResult(), result.integer);
// It's useful to verify load values from memory, but in MP
// systems the value obtained at execute may be different than
@@ -371,7 +371,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst)
if (inst->readNextPC() != thread->readNextPC()) {
warn("%lli: Instruction next PCs do not match! Inst: %#x, "
"checker: %#x",
- curTick, inst->readNextPC(), thread->readNextPC());
+ curTick(), inst->readNextPC(), thread->readNextPC());
handleError(inst);
}
@@ -388,7 +388,7 @@ Checker<DynInstPtr>::validateExecution(DynInstPtr &inst)
thread->readMiscRegNoEffect(misc_reg_idx)) {
warn("%lli: Misc reg idx %i (side effect) does not match! "
"Inst: %#x, checker: %#x",
- curTick, misc_reg_idx,
+ curTick(), misc_reg_idx,
inst->tcBase()->readMiscRegNoEffect(misc_reg_idx),
thread->readMiscRegNoEffect(misc_reg_idx));
handleError(inst);
@@ -402,7 +402,7 @@ Checker<DynInstPtr>::validateState()
{
if (updateThisCycle) {
warn("%lli: Instruction PC %#x results didn't match up, copying all "
- "registers from main CPU", curTick, unverifiedInst->readPC());
+ "registers from main CPU", curTick(), unverifiedInst->readPC());
// Heavy-weight copying of all registers
thread->copyArchRegs(unverifiedInst->tcBase());
// Also advance the PC. Hopefully no PC-based events happened.
diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc
index 5b5f524f2..fe2c8e708 100644
--- a/src/cpu/inorder/cpu.cc
+++ b/src/cpu/inorder/cpu.cc
@@ -158,7 +158,7 @@ void
InOrderCPU::CPUEvent::scheduleEvent(int delay)
{
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
}
void
@@ -337,7 +337,7 @@ InOrderCPU::InOrderCPU(Params *params)
dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0, 0);
dummyBufferInst->setSquashed();
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
// Reset CPU to reset state.
#if FULL_SYSTEM
@@ -528,17 +528,17 @@ InOrderCPU::tick()
if (!tickEvent.scheduled()) {
if (_status == SwitchedOut) {
// increment stat
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
} else if (!activityRec.active()) {
DPRINTF(InOrderCPU, "sleeping CPU.\n");
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
timesIdled++;
} else {
- //Tick next_tick = curTick + cycles(1);
+ //Tick next_tick = curTick() + cycles(1);
//tickEvent.schedule(next_tick);
- schedule(&tickEvent, nextCycle(curTick + 1));
+ schedule(&tickEvent, nextCycle(curTick() + 1));
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
- nextCycle(curTick + 1));
+ nextCycle(curTick() + 1));
}
}
@@ -693,10 +693,10 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
event_pri_offset);
- Tick sked_tick = nextCycle(curTick + ticks(delay));
+ Tick sked_tick = nextCycle(curTick() + ticks(delay));
if (delay >= 0) {
DPRINTF(InOrderCPU, "Scheduling CPU Event (%s) for cycle %i, [tid:%i].\n",
- eventNames[c_event], curTick + delay, tid);
+ eventNames[c_event], curTick() + delay, tid);
schedule(cpu_event, sked_tick);
} else {
cpu_event->process();
@@ -791,7 +791,7 @@ InOrderCPU::activateThread(ThreadID tid)
activateThreadInPipeline(tid);
- thread[tid]->lastActivate = curTick;
+ thread[tid]->lastActivate = curTick();
tcBase(tid)->setStatus(ThreadContext::Active);
@@ -963,7 +963,7 @@ InOrderCPU::suspendThread(ThreadID tid)
tid);
deactivateThread(tid);
suspendedThreads.push_back(tid);
- thread[tid]->lastSuspend = curTick;
+ thread[tid]->lastSuspend = curTick();
tcBase(tid)->setStatus(ThreadContext::Suspended);
}
@@ -1124,7 +1124,7 @@ InOrderCPU::instDone(DynInstPtr inst, ThreadID tid)
// Finalize Trace Data For Instruction
if (inst->traceData) {
- //inst->traceData->setCycle(curTick);
+ //inst->traceData->setCycle(curTick());
inst->traceData->setFetchSeq(inst->seqNum);
//inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
inst->traceData->dump();
@@ -1390,7 +1390,7 @@ InOrderCPU::wakeCPU()
DPRINTF(Activity, "Waking up CPU\n");
- Tick extra_cycles = tickToCycles((curTick - 1) - lastRunningCycle);
+ Tick extra_cycles = tickToCycles((curTick() - 1) - lastRunningCycle);
idleCycles += extra_cycles;
for (int stage_num = 0; stage_num < NumStages; stage_num++) {
@@ -1399,7 +1399,7 @@ InOrderCPU::wakeCPU()
numCycles += extra_cycles;
- schedule(&tickEvent, nextCycle(curTick));
+ schedule(&tickEvent, nextCycle(curTick()));
}
#if FULL_SYSTEM
diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh
index 38978dbd7..c3658373a 100644
--- a/src/cpu/inorder/cpu.hh
+++ b/src/cpu/inorder/cpu.hh
@@ -157,7 +157,7 @@ class InOrderCPU : public BaseCPU
void scheduleTickEvent(int delay)
{
assert(!tickEvent.scheduled() || tickEvent.squashed());
- reschedule(&tickEvent, nextCycle(curTick + ticks(delay)), true);
+ reschedule(&tickEvent, nextCycle(curTick() + ticks(delay)), true);
}
/** Unschedule tick event, regardless of its current state. */
diff --git a/src/cpu/inorder/inorder_dyn_inst.cc b/src/cpu/inorder/inorder_dyn_inst.cc
index 70fd59418..6afe35862 100644
--- a/src/cpu/inorder/inorder_dyn_inst.cc
+++ b/src/cpu/inorder/inorder_dyn_inst.cc
@@ -442,7 +442,7 @@ InOrderDynInst::setMiscRegOperand(const StaticInst *si, int idx,
{
instResult[idx].type = Integer;
instResult[idx].val.integer = val;
- instResult[idx].tick = curTick;
+ instResult[idx].tick = curTick();
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Misc Reg. Operand %i "
"being set to %#x.\n", threadNumber, seqNum, idx, val);
@@ -472,7 +472,7 @@ InOrderDynInst::setIntRegOperand(const StaticInst *si, int idx, IntReg val)
{
instResult[idx].type = Integer;
instResult[idx].val.integer = val;
- instResult[idx].tick = curTick;
+ instResult[idx].tick = curTick();
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Int Reg. %i "
"being set to %#x (result-tick:%i).\n",
@@ -485,7 +485,7 @@ InOrderDynInst::setFloatRegOperand(const StaticInst *si, int idx, FloatReg val)
{
instResult[idx].val.dbl = val;
instResult[idx].type = Float;
- instResult[idx].tick = curTick;
+ instResult[idx].tick = curTick();
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Float Reg. %i "
"being set to %#x (result-tick:%i).\n",
@@ -499,7 +499,7 @@ InOrderDynInst::setFloatRegOperandBits(const StaticInst *si, int idx,
{
instResult[idx].type = Integer;
instResult[idx].val.integer = val;
- instResult[idx].tick = curTick;
+ instResult[idx].tick = curTick();
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting Result Float Reg. %i "
"being set to %#x (result-tick:%i).\n",
diff --git a/src/cpu/inorder/pipeline_stage.cc b/src/cpu/inorder/pipeline_stage.cc
index dc36965b0..2ac402fae 100644
--- a/src/cpu/inorder/pipeline_stage.cc
+++ b/src/cpu/inorder/pipeline_stage.cc
@@ -338,7 +338,7 @@ void
PipelineStage::squashDueToBranch(DynInstPtr &inst, ThreadID tid)
{
if (cpu->squashSeqNum[tid] < inst->seqNum &&
- cpu->lastSquashCycle[tid] == curTick){
+ cpu->lastSquashCycle[tid] == curTick()){
DPRINTF(Resource, "Ignoring [sn:%i] branch squash signal due to "
"another stage's squash signal for after [sn:%i].\n",
inst->seqNum, cpu->squashSeqNum[tid]);
@@ -371,7 +371,7 @@ PipelineStage::squashDueToBranch(DynInstPtr &inst, ThreadID tid)
// Save squash num for later stage use
cpu->squashSeqNum[tid] = squash_seq_num;
- cpu->lastSquashCycle[tid] = curTick;
+ cpu->lastSquashCycle[tid] = curTick();
}
}
@@ -969,7 +969,7 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
inst->popSchedEntry();
} else {
panic("%i: encountered %s fault!\n",
- curTick, req->fault->name());
+ curTick(), req->fault->name());
}
reqs_processed++;
@@ -1075,7 +1075,7 @@ PipelineStage::sendInstToNextStage(DynInstPtr inst)
if (nextStageQueueValid(inst->nextStage - 1)) {
if (inst->seqNum > cpu->squashSeqNum[tid] &&
- curTick == cpu->lastSquashCycle[tid]) {
+ curTick() == cpu->lastSquashCycle[tid]) {
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, skipping "
"insertion into stage %i queue.\n", tid, inst->seqNum,
inst->nextStage);
@@ -1107,7 +1107,7 @@ PipelineStage::sendInstToNextStage(DynInstPtr inst)
// Take note of trace data for this inst & stage
if (inst->traceData) {
- inst->traceData->setStageCycle(stageNum, curTick);
+ inst->traceData->setStageCycle(stageNum, curTick());
}
}
diff --git a/src/cpu/inorder/reg_dep_map.cc b/src/cpu/inorder/reg_dep_map.cc
index 50636cb81..98a0727a9 100644
--- a/src/cpu/inorder/reg_dep_map.cc
+++ b/src/cpu/inorder/reg_dep_map.cc
@@ -181,14 +181,14 @@ RegDepMap::canForward(unsigned reg_idx, DynInstPtr inst)
assert(dest_reg_idx != -1);
if (forward_inst->isExecuted() &&
- forward_inst->readResultTime(dest_reg_idx) < curTick) {
+ forward_inst->readResultTime(dest_reg_idx) < curTick()) {
return forward_inst;
} else {
if (!forward_inst->isExecuted()) {
DPRINTF(RegDepMap, "[sn:%i] Can't get value through "
"forwarding, [sn:%i] has not been executed yet.\n",
inst->seqNum, forward_inst->seqNum);
- } else if (forward_inst->readResultTime(dest_reg_idx) >= curTick) {
+ } else if (forward_inst->readResultTime(dest_reg_idx) >= curTick()) {
DPRINTF(RegDepMap, "[sn:%i] Can't get value through "
"forwarding, [sn:%i] executed on tick:%i.\n",
inst->seqNum, forward_inst->seqNum,
diff --git a/src/cpu/inorder/resource.cc b/src/cpu/inorder/resource.cc
index 0d8dbb3e4..8c5f86c73 100644
--- a/src/cpu/inorder/resource.cc
+++ b/src/cpu/inorder/resource.cc
@@ -366,7 +366,7 @@ Resource::scheduleEvent(int slot_idx, int delay)
DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
reqMap[slot_idx]->inst->readTid(),
reqMap[slot_idx]->inst->seqNum,
- cpu->ticks(delay) + curTick);
+ cpu->ticks(delay) + curTick());
resourceEvent[slot_idx].scheduleEvent(delay);
}
@@ -504,5 +504,5 @@ ResourceEvent::scheduleEvent(int delay)
{
assert(!scheduled() || squashed());
resource->cpu->reschedule(this,
- curTick + resource->ticks(delay), true);
+ curTick() + resource->ticks(delay), true);
}
diff --git a/src/cpu/inorder/resource_pool.9stage.cc b/src/cpu/inorder/resource_pool.9stage.cc
index 05ce91faa..746d3f33b 100644
--- a/src/cpu/inorder/resource_pool.9stage.cc
+++ b/src/cpu/inorder/resource_pool.9stage.cc
@@ -177,13 +177,13 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case InOrderCPU::ActivateThread:
{
DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
res_pool_event->setEvent(e_type,
inst,
inst->squashingStage,
inst->bdelaySeqNum,
inst->readTid());
- res_pool_event->schedule(curTick + cpu->cycles(delay));
+ res_pool_event->schedule(curTick() + cpu->cycles(delay));
}
break;
@@ -192,7 +192,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case InOrderCPU::DeallocateThread:
{
DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool Event for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
res_pool_event->setEvent(e_type,
inst,
@@ -200,7 +200,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
inst->bdelaySeqNum,
tid);
- res_pool_event->schedule(curTick + cpu->cycles(delay));
+ res_pool_event->schedule(curTick() + cpu->cycles(delay));
}
break;
@@ -208,14 +208,14 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case ResourcePool::InstGraduated:
{
DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool Event for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
res_pool_event->setEvent(e_type,
inst,
inst->squashingStage,
inst->seqNum,
inst->readTid());
- res_pool_event->schedule(curTick + cpu->cycles(delay));
+ res_pool_event->schedule(curTick() + cpu->cycles(delay));
}
break;
@@ -223,13 +223,13 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case ResourcePool::SquashAll:
{
DPRINTF(Resource, "Scheduling Squash Resource Pool Event for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
res_pool_event->setEvent(e_type,
inst,
inst->squashingStage,
inst->bdelaySeqNum,
inst->readTid());
- res_pool_event->schedule(curTick + cpu->cycles(delay));
+ res_pool_event->schedule(curTick() + cpu->cycles(delay));
}
break;
@@ -345,9 +345,9 @@ void
ResourcePool::ResPoolEvent::scheduleEvent(int delay)
{
if (squashed())
- reschedule(curTick + resPool->cpu->cycles(delay));
+ reschedule(curTick() + resPool->cpu->cycles(delay));
else if (!scheduled())
- schedule(curTick + resPool->cpu->cycles(delay));
+ schedule(curTick() + resPool->cpu->cycles(delay));
}
/** Unschedule resource event, regardless of its current state. */
diff --git a/src/cpu/inorder/resource_pool.cc b/src/cpu/inorder/resource_pool.cc
index e199d2bc2..e8400405a 100644
--- a/src/cpu/inorder/resource_pool.cc
+++ b/src/cpu/inorder/resource_pool.cc
@@ -244,14 +244,14 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
{
assert(delay >= 0);
- Tick when = cpu->nextCycle(curTick + cpu->ticks(delay));
+ Tick when = cpu->nextCycle(curTick() + cpu->ticks(delay));
switch (e_type)
{
case InOrderCPU::ActivateThread:
{
DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event "
- "for tick %i, [tid:%i].\n", curTick + delay,
+ "for tick %i, [tid:%i].\n", curTick() + delay,
inst->readTid());
ResPoolEvent *res_pool_event =
new ResPoolEvent(this,
@@ -269,7 +269,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
{
DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool "
- "Event for tick %i.\n", curTick + delay);
+ "Event for tick %i.\n", curTick() + delay);
ResPoolEvent *res_pool_event =
new ResPoolEvent(this,
e_type,
@@ -304,7 +304,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case ResourcePool::InstGraduated:
{
DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool "
- "Event for tick %i.\n", curTick + delay);
+ "Event for tick %i.\n", curTick() + delay);
ResPoolEvent *res_pool_event =
new ResPoolEvent(this,e_type,
inst,
@@ -318,7 +318,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
case ResourcePool::SquashAll:
{
DPRINTF(Resource, "Scheduling Squash Resource Pool Event for "
- "tick %i.\n", curTick + delay);
+ "tick %i.\n", curTick() + delay);
ResPoolEvent *res_pool_event =
new ResPoolEvent(this,e_type,
inst,
@@ -333,7 +333,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
{
DPRINTF(Resource, "Scheduling Squash Due to Memory Stall Resource "
"Pool Event for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
ResPoolEvent *res_pool_event =
new ResPoolEvent(this,e_type,
inst,
@@ -348,7 +348,7 @@ ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
{
DPRINTF(Resource, "Scheduling UpdatePC Resource Pool Event "
"for tick %i.\n",
- curTick + delay);
+ curTick() + delay);
ResPoolEvent *res_pool_event = new ResPoolEvent(this,e_type,
inst,
inst->squashingStage,
@@ -542,7 +542,7 @@ ResourcePool::ResPoolEvent::scheduleEvent(int delay)
{
InOrderCPU *cpu = resPool->cpu;
assert(!scheduled() || squashed());
- cpu->reschedule(this, cpu->nextCycle(curTick + cpu->ticks(delay)), true);
+ cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
}
/** Unschedule resource event, regardless of its current state. */
diff --git a/src/cpu/inorder/resources/branch_predictor.cc b/src/cpu/inorder/resources/branch_predictor.cc
index 33b67ce4a..dc036df64 100644
--- a/src/cpu/inorder/resources/branch_predictor.cc
+++ b/src/cpu/inorder/resources/branch_predictor.cc
@@ -80,7 +80,7 @@ BranchPredictor::execute(int slot_num)
case PredictBranch:
{
if (inst->seqNum > cpu->squashSeqNum[tid] &&
- curTick == cpu->lastSquashCycle[tid]) {
+ curTick() == cpu->lastSquashCycle[tid]) {
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, "
"skipping prediction \n", tid, inst->seqNum);
} else {
@@ -125,7 +125,7 @@ BranchPredictor::execute(int slot_num)
case UpdatePredictor:
{
if (inst->seqNum > cpu->squashSeqNum[tid] &&
- curTick == cpu->lastSquashCycle[tid]) {
+ curTick() == cpu->lastSquashCycle[tid]) {
DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, "
"skipping branch predictor update \n",
tid, inst->seqNum);
diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc
index 5f9ddd372..bb4caf48a 100644
--- a/src/cpu/inorder/resources/cache_unit.cc
+++ b/src/cpu/inorder/resources/cache_unit.cc
@@ -63,7 +63,7 @@ Tick
CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
{
panic("CacheUnit::CachePort doesn't expect recvAtomic callback!");
- return curTick;
+ return curTick();
}
void
@@ -167,7 +167,7 @@ CacheUnit::getSlot(DynInstPtr inst)
if (new_slot == -1)
return -1;
- inst->memTime = curTick;
+ inst->memTime = curTick();
setAddrDependency(inst);
return new_slot;
} else {
@@ -343,7 +343,7 @@ CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
break;
default:
- panic("%i: Unexpected request type (%i) to %s", curTick,
+ panic("%i: Unexpected request type (%i) to %s", curTick(),
sched_entry->cmd, name());
}
@@ -482,7 +482,7 @@ CacheUnit::read(DynInstPtr inst, Addr addr,
if (secondAddr > addr && !inst->split2ndAccess) {
DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for "
- "(%#x, %#x).\n", curTick, inst->seqNum, addr, secondAddr);
+ "(%#x, %#x).\n", curTick(), inst->seqNum, addr, secondAddr);
// Save All "Total" Split Information
// ==============================
diff --git a/src/cpu/inorder/resources/execution_unit.cc b/src/cpu/inorder/resources/execution_unit.cc
index 4342042e9..9ba7a64c7 100644
--- a/src/cpu/inorder/resources/execution_unit.cc
+++ b/src/cpu/inorder/resources/execution_unit.cc
@@ -55,7 +55,7 @@ ExecutionUnit::regStats()
.name(name() + ".predictedNotTakenIncorrect")
.desc("Number of Branches Incorrectly Predicted As Not Taken).");
- lastExecuteCycle = curTick;
+ lastExecuteCycle = curTick();
executions
.name(name() + ".executions")
@@ -98,8 +98,8 @@ ExecutionUnit::execute(int slot_num)
{
case ExecuteInst:
{
- if (curTick != lastExecuteCycle) {
- lastExecuteCycle = curTick;
+ if (curTick() != lastExecuteCycle) {
+ lastExecuteCycle = curTick();
}
diff --git a/src/cpu/inorder/resources/fetch_seq_unit.cc b/src/cpu/inorder/resources/fetch_seq_unit.cc
index 3bfe912e7..7fd57cc75 100644
--- a/src/cpu/inorder/resources/fetch_seq_unit.cc
+++ b/src/cpu/inorder/resources/fetch_seq_unit.cc
@@ -210,13 +210,13 @@ FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
}
if (squashSeqNum[tid] <= done_seq_num &&
- lastSquashCycle[tid] == curTick) {
+ lastSquashCycle[tid] == curTick()) {
DPRINTF(InOrderFetchSeq, "[tid:%i]: Ignoring squash from stage %i, "
"since there is an outstanding squash that is older.\n",
tid, squash_stage);
} else {
squashSeqNum[tid] = done_seq_num;
- lastSquashCycle[tid] = curTick;
+ lastSquashCycle[tid] = curTick();
// If The very next instruction number is the done seq. num,
// then we haven't seen the delay slot yet ... if it isn't
diff --git a/src/cpu/inorder/resources/graduation_unit.cc b/src/cpu/inorder/resources/graduation_unit.cc
index a9b96a49f..9d19c2eef 100644
--- a/src/cpu/inorder/resources/graduation_unit.cc
+++ b/src/cpu/inorder/resources/graduation_unit.cc
@@ -64,8 +64,8 @@ GraduationUnit::execute(int slot_num)
// @TODO: Instructions should never really get to this point since
// this should be handled through the request interface. Check to
// make sure this happens and delete this code.
- if (lastCycleGrad != curTick) {
- lastCycleGrad = curTick;
+ if (lastCycleGrad != curTick()) {
+ lastCycleGrad = curTick();
numCycleGrad = 0;
} else if (numCycleGrad > width) {
DPRINTF(InOrderGraduation,
@@ -91,7 +91,7 @@ GraduationUnit::execute(int slot_num)
}
if (inst->traceData) {
- inst->traceData->setStageCycle(stage_num, curTick);
+ inst->traceData->setStageCycle(stage_num, curTick());
}
// Tell CPU that instruction is finished processing
diff --git a/src/cpu/inorder/resources/mult_div_unit.cc b/src/cpu/inorder/resources/mult_div_unit.cc
index d9a887571..55df1cc43 100644
--- a/src/cpu/inorder/resources/mult_div_unit.cc
+++ b/src/cpu/inorder/resources/mult_div_unit.cc
@@ -163,7 +163,7 @@ MultDivUnit::getSlot(DynInstPtr inst)
}
}
- if (lastMDUCycle + repeat_rate > curTick) {
+ if (lastMDUCycle + repeat_rate > curTick()) {
DPRINTF(InOrderMDU, "MDU not ready to process another inst. until %i, "
"denying request.\n", lastMDUCycle + repeat_rate);
return -1;
@@ -173,7 +173,7 @@ MultDivUnit::getSlot(DynInstPtr inst)
rval);
if (rval != -1) {
- lastMDUCycle = curTick;
+ lastMDUCycle = curTick();
lastOpType = inst->opClass();
lastInstName = inst->staticInst->getName();
}
diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh
index e8681f6e3..2912cbb03 100644
--- a/src/cpu/o3/commit_impl.hh
+++ b/src/cpu/o3/commit_impl.hh
@@ -475,7 +475,7 @@ DefaultCommit<Impl>::generateTrapEvent(ThreadID tid)
TrapEvent *trap = new TrapEvent(this, tid);
- cpu->schedule(trap, curTick + trapLatency);
+ cpu->schedule(trap, curTick() + trapLatency);
trapInFlight[tid] = true;
}
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 21c5cc706..9becc6601 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -334,7 +334,7 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
// Setup the ROB for whichever stages need it.
commit.setROB(&rob);
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
lastActivatedCycle = -1;
#if 0
@@ -538,13 +538,13 @@ FullO3CPU<Impl>::tick()
getState() == SimObject::Drained) {
DPRINTF(O3CPU, "Switched out!\n");
// increment stat
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
} else if (!activityRec.active() || _status == Idle) {
DPRINTF(O3CPU, "Idle!\n");
- lastRunningCycle = curTick;
+ lastRunningCycle = curTick();
timesIdled++;
} else {
- schedule(tickEvent, nextCycle(curTick + ticks(1)));
+ schedule(tickEvent, nextCycle(curTick() + ticks(1)));
DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
@@ -639,13 +639,13 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
// Needs to set each stage to running as well.
if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate "
- "on cycle %d\n", tid, curTick + ticks(delay));
+ "on cycle %d\n", tid, curTick() + ticks(delay));
scheduleActivateThreadEvent(tid, delay);
} else {
activateThread(tid);
}
- if (lastActivatedCycle < curTick) {
+ if (lastActivatedCycle < curTick()) {
scheduleTickEvent(delay);
// Be sure to signal that there's some activity so the CPU doesn't
@@ -653,7 +653,7 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
activityRec.activity();
fetch.wakeFromQuiesce();
- lastActivatedCycle = curTick;
+ lastActivatedCycle = curTick();
_status = Running;
}
@@ -666,7 +666,7 @@ FullO3CPU<Impl>::deallocateContext(ThreadID tid, bool remove, int delay)
// Schedule removal of thread data from CPU
if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate "
- "on cycle %d\n", tid, curTick + ticks(delay));
+ "on cycle %d\n", tid, curTick() + ticks(delay));
scheduleDeallocateContextEvent(tid, remove, delay);
return false;
} else {
@@ -1552,8 +1552,8 @@ FullO3CPU<Impl>::wakeCPU()
DPRINTF(Activity, "Waking up CPU\n");
- idleCycles += tickToCycles((curTick - 1) - lastRunningCycle);
- numCycles += tickToCycles((curTick - 1) - lastRunningCycle);
+ idleCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
+ numCycles += tickToCycles((curTick() - 1) - lastRunningCycle);
schedule(tickEvent, nextCycle());
}
diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh
index 832d98f55..e3d13c840 100644
--- a/src/cpu/o3/cpu.hh
+++ b/src/cpu/o3/cpu.hh
@@ -140,9 +140,9 @@ class FullO3CPU : public BaseO3CPU
void scheduleTickEvent(int delay)
{
if (tickEvent.squashed())
- reschedule(tickEvent, nextCycle(curTick + ticks(delay)));
+ reschedule(tickEvent, nextCycle(curTick() + ticks(delay)));
else if (!tickEvent.scheduled())
- schedule(tickEvent, nextCycle(curTick + ticks(delay)));
+ schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
}
/** Unschedule tick event, regardless of its current state. */
@@ -182,10 +182,10 @@ class FullO3CPU : public BaseO3CPU
// Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed())
reschedule(activateThreadEvent[tid],
- nextCycle(curTick + ticks(delay)));
+ nextCycle(curTick() + ticks(delay)));
else if (!activateThreadEvent[tid].scheduled())
schedule(activateThreadEvent[tid],
- nextCycle(curTick + ticks(delay)));
+ nextCycle(curTick() + ticks(delay)));
}
/** Unschedule actiavte thread event, regardless of its current state. */
@@ -235,10 +235,10 @@ class FullO3CPU : public BaseO3CPU
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
reschedule(deallocateContextEvent[tid],
- nextCycle(curTick + ticks(delay)));
+ nextCycle(curTick() + ticks(delay)));
else if (!deallocateContextEvent[tid].scheduled())
schedule(deallocateContextEvent[tid],
- nextCycle(curTick + ticks(delay)));
+ nextCycle(curTick() + ticks(delay)));
}
/** Unschedule thread deallocation in CPU */
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index cca6b7a57..28ef423c4 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -68,7 +68,7 @@ Tick
DefaultFetch<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
{
panic("DefaultFetch doesn't expect recvAtomic callback!");
- return curTick;
+ return curTick();
}
template<class Impl>
@@ -625,7 +625,7 @@ DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, Fault &ret_fault, ThreadID tid,
DPRINTF(Fetch, "[tid:%i]: Doing cache access.\n", tid);
- lastIcacheStall[tid] = curTick;
+ lastIcacheStall[tid] = curTick();
DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache "
"response.\n", tid);
@@ -992,7 +992,7 @@ DefaultFetch<Impl>::buildInst(ThreadID tid, StaticInstPtr staticInst,
#if TRACING_ON
if (trace) {
instruction->traceData =
- cpu->getTracer()->getInstRecord(curTick, cpu->tcBase(tid),
+ cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
instruction->staticInst, thisPC, curMacroop);
}
#else
diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh
index b944979f2..ce408dfd0 100644
--- a/src/cpu/o3/inst_queue_impl.hh
+++ b/src/cpu/o3/inst_queue_impl.hh
@@ -754,7 +754,7 @@ InstructionQueue<Impl>::scheduleReadyInsts()
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- cpu->schedule(execution, curTick + cpu->ticks(op_latency - 1));
+ cpu->schedule(execution, curTick() + cpu->ticks(op_latency - 1));
// @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) {
diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh
index e780b14e4..ddfc63754 100644
--- a/src/cpu/o3/lsq_impl.hh
+++ b/src/cpu/o3/lsq_impl.hh
@@ -55,7 +55,7 @@ Tick
LSQ<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
{
panic("O3CPU model does not work with atomic mode!");
- return curTick;
+ return curTick();
}
template <class Impl>
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index e9e3fea96..2bb42cadc 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -624,7 +624,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
// We'll say this has a 1 cycle load-store forwarding latency
// for now.
// @todo: Need to make this a parameter.
- cpu->schedule(wb, curTick);
+ cpu->schedule(wb, curTick());
// Don't need to do anything special for split loads.
if (TheISA::HasUnalignedMemAcc && sreqLow) {
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index 807c0b527..64d674666 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -783,7 +783,7 @@ LSQUnit<Impl>::writebackStores()
"Instantly completing it.\n",
inst->seqNum);
WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
- cpu->schedule(wb, curTick + 1);
+ cpu->schedule(wb, curTick() + 1);
completeStore(storeWBIdx);
incrStIdx(storeWBIdx);
continue;
diff --git a/src/cpu/o3/thread_context_impl.hh b/src/cpu/o3/thread_context_impl.hh
index 060baed32..e7b0540d1 100755
--- a/src/cpu/o3/thread_context_impl.hh
+++ b/src/cpu/o3/thread_context_impl.hh
@@ -115,7 +115,7 @@ O3ThreadContext<Impl>::activate(int delay)
return;
#if FULL_SYSTEM
- thread->lastActivate = curTick;
+ thread->lastActivate = curTick();
#endif
thread->setStatus(ThreadContext::Active);
@@ -135,8 +135,8 @@ O3ThreadContext<Impl>::suspend(int delay)
return;
#if FULL_SYSTEM
- thread->lastActivate = curTick;
- thread->lastSuspend = curTick;
+ thread->lastActivate = curTick();
+ thread->lastSuspend = curTick();
#endif
/*
#if FULL_SYSTEM
diff --git a/src/cpu/ozone/back_end.hh b/src/cpu/ozone/back_end.hh
index 7a2da3239..95e079d0d 100644
--- a/src/cpu/ozone/back_end.hh
+++ b/src/cpu/ozone/back_end.hh
@@ -468,7 +468,7 @@ BackEnd<Impl>::read(RequestPtr req, T &data, int load_idx)
if (fault == NoFault && dcacheInterface) {
memReq->cmd = Read;
memReq->completionEvent = NULL;
- memReq->time = curTick;
+ memReq->time = curTick();
memReq->flags &= ~INST_READ;
MemAccessResult result = dcacheInterface->access(memReq);
@@ -481,7 +481,7 @@ BackEnd<Impl>::read(RequestPtr req, T &data, int load_idx)
--funcExeInst;
memReq->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
// status = DcacheMissStall;
DPRINTF(OzoneCPU, "Dcache miss stall!\n");
@@ -510,7 +510,7 @@ BackEnd<Impl>::write(RequestPtr req, T &data, int store_idx)
memReq->cmd = Write;
memcpy(memReq->data,(uint8_t *)&data,memReq->size);
memReq->completionEvent = NULL;
- memReq->time = curTick;
+ memReq->time = curTick();
memReq->flags &= ~INST_READ;
MemAccessResult result = dcacheInterface->access(memReq);
@@ -519,7 +519,7 @@ BackEnd<Impl>::write(RequestPtr req, T &data, int store_idx)
// at some point.
if (result != MA_HIT && dcacheInterface->doEvents()) {
memReq->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
// status = DcacheMissStall;
DPRINTF(OzoneCPU, "Dcache miss stall!\n");
diff --git a/src/cpu/ozone/cpu.hh b/src/cpu/ozone/cpu.hh
index fcc5602eb..1b196feb4 100644
--- a/src/cpu/ozone/cpu.hh
+++ b/src/cpu/ozone/cpu.hh
@@ -277,9 +277,9 @@ class OzoneCPU : public BaseCPU
void scheduleTickEvent(int delay)
{
if (tickEvent.squashed())
- tickEvent.reschedule(curTick + ticks(delay));
+ tickEvent.reschedule(curTick() + ticks(delay));
else if (!tickEvent.scheduled())
- tickEvent.schedule(curTick + ticks(delay));
+ tickEvent.schedule(curTick() + ticks(delay));
}
/// Unschedule tick event, regardless of its current state.
diff --git a/src/cpu/ozone/cpu_impl.hh b/src/cpu/ozone/cpu_impl.hh
index a22ada5d0..dd6c3dcf1 100644
--- a/src/cpu/ozone/cpu_impl.hh
+++ b/src/cpu/ozone/cpu_impl.hh
@@ -301,7 +301,7 @@ OzoneCPU<Impl>::takeOverFrom(BaseCPU *oldCPU)
if (tc->status() == ThreadContext::Active &&
_status != Running) {
_status = Running;
- tickEvent.schedule(curTick);
+ tickEvent.schedule(curTick());
}
}
// Nothing running, change status to reflect that we're no longer
@@ -525,7 +525,7 @@ OzoneCPU<Impl>::tick()
comInstEventQueue[0]->serviceEvents(numInst);
if (!tickEvent.scheduled() && _status == Running)
- tickEvent.schedule(curTick + ticks(1));
+ tickEvent.schedule(curTick() + ticks(1));
}
template <class Impl>
diff --git a/src/cpu/ozone/front_end_impl.hh b/src/cpu/ozone/front_end_impl.hh
index 884136927..d7ed0b77a 100644
--- a/src/cpu/ozone/front_end_impl.hh
+++ b/src/cpu/ozone/front_end_impl.hh
@@ -52,7 +52,7 @@ Tick
FrontEnd<Impl>::IcachePort::recvAtomic(PacketPtr pkt)
{
panic("FrontEnd doesn't expect recvAtomic callback!");
- return curTick;
+ return curTick();
}
template<class Impl>
@@ -432,7 +432,7 @@ FrontEnd<Impl>::tick()
#if FULL_SYSTEM
if (inst->isQuiesce()) {
-// warn("%lli: Quiesce instruction encountered, halting fetch!", curTick);
+// warn("%lli: Quiesce instruction encountered, halting fetch!", curTick());
status = QuiescePending;
break;
}
@@ -894,7 +894,7 @@ FrontEnd<Impl>::getInstFromCacheline()
instruction->staticInst->disassemble(PC));
instruction->traceData =
- Trace::getInstRecord(curTick, tc,
+ Trace::getInstRecord(curTick(), tc,
instruction->staticInst,
instruction->readPC());
diff --git a/src/cpu/ozone/inorder_back_end.hh b/src/cpu/ozone/inorder_back_end.hh
index 9c2699610..fcdc2a38a 100644
--- a/src/cpu/ozone/inorder_back_end.hh
+++ b/src/cpu/ozone/inorder_back_end.hh
@@ -210,7 +210,7 @@ InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags)
if (fault == NoFault && dcacheInterface) {
memReq->cmd = Read;
memReq->completionEvent = NULL;
- memReq->time = curTick;
+ memReq->time = curTick();
MemAccessResult result = dcacheInterface->access(memReq);
// Ugly hack to get an event scheduled *only* if the access is
@@ -220,7 +220,7 @@ InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags)
// Fix this hack for keeping funcExeInst correct with loads that
// are executed twice.
memReq->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
status = DcacheMissLoadStall;
DPRINTF(IBE, "Dcache miss stall!\n");
@@ -246,7 +246,7 @@ InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res)
memReq->cmd = Write;
// memcpy(memReq->data,(uint8_t *)&data,memReq->size);
memReq->completionEvent = NULL;
- memReq->time = curTick;
+ memReq->time = curTick();
MemAccessResult result = dcacheInterface->access(memReq);
// Ugly hack to get an event scheduled *only* if the access is
@@ -254,7 +254,7 @@ InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res)
// at some point.
if (result != MA_HIT) {
memReq->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
status = DcacheMissStoreStall;
DPRINTF(IBE, "Dcache miss stall!\n");
@@ -280,7 +280,7 @@ InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx)
// Fault fault = cpu->translateDataReadReq(req);
req->cmd = Read;
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
assert(!req->data);
req->data = new uint8_t[64];
Fault fault = cpu->read(req, data);
@@ -295,7 +295,7 @@ InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx)
// at some point.
if (result != MA_HIT) {
req->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
status = DcacheMissLoadStall;
DPRINTF(IBE, "Dcache miss load stall!\n");
@@ -320,7 +320,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx)
req->cmd = Write;
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
assert(!req->data);
req->data = new uint8_t[64];
memcpy(req->data, (uint8_t *)&data, req->size);
@@ -347,7 +347,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx)
req->data = new uint8_t[64];
memcpy(req->data,(uint8_t *)&data,req->size);
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
MemAccessResult result = dcacheInterface->access(req);
// Ugly hack to get an event scheduled *only* if the access is
@@ -355,7 +355,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx)
// at some point.
if (result != MA_HIT) {
req->completionEvent = &cacheCompletionEvent;
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
// unscheduleTickEvent();
status = DcacheMissStoreStall;
DPRINTF(IBE, "Dcache miss store stall!\n");
diff --git a/src/cpu/ozone/inst_queue_impl.hh b/src/cpu/ozone/inst_queue_impl.hh
index ae2e3b09b..0068f2977 100644
--- a/src/cpu/ozone/inst_queue_impl.hh
+++ b/src/cpu/ozone/inst_queue_impl.hh
@@ -673,7 +673,7 @@ InstQueue<Impl>::scheduleReadyInsts()
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
- execution->schedule(curTick + issue_latency - 1);
+ execution->schedule(curTick() + issue_latency - 1);
} else {
i2e_info->insts[exec_queue_slot++] = issuing_inst;
i2e_info->size++;
diff --git a/src/cpu/ozone/lsq_unit.hh b/src/cpu/ozone/lsq_unit.hh
index d8e402b65..0216c5013 100644
--- a/src/cpu/ozone/lsq_unit.hh
+++ b/src/cpu/ozone/lsq_unit.hh
@@ -485,7 +485,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx)
req->cmd = Read;
assert(!req->completionEvent);
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
assert(!req->data);
req->data = new uint8_t[64];
@@ -502,7 +502,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx)
// We'll say this has a 1 cycle load-store forwarding latency
// for now.
// FIXME - Need to make this a parameter.
- wb->schedule(curTick);
+ wb->schedule(curTick());
// Should keep track of stat for forwarded data
return NoFault;
@@ -562,7 +562,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx)
// Setup MemReq pointer
req->cmd = Read;
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
assert(!req->data);
req->data = new uint8_t[64];
@@ -585,7 +585,7 @@ OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx)
DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
inst->seqNum);
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
_status = DcacheMissStall;
diff --git a/src/cpu/ozone/lsq_unit_impl.hh b/src/cpu/ozone/lsq_unit_impl.hh
index dd44adf6e..f36b870d8 100644
--- a/src/cpu/ozone/lsq_unit_impl.hh
+++ b/src/cpu/ozone/lsq_unit_impl.hh
@@ -557,7 +557,7 @@ OzoneLSQ<Impl>::writebackStores()
// Fault fault = cpu->translateDataReadReq(req);
req->cmd = Write;
req->completionEvent = NULL;
- req->time = curTick;
+ req->time = curTick();
assert(!req->data);
req->data = new uint8_t[64];
memcpy(req->data, (uint8_t *)&storeQueue[storeWBIdx].data, req->size);
@@ -615,7 +615,7 @@ OzoneLSQ<Impl>::writebackStores()
req->completionEvent = new
StoreCompletionEvent(storeWBIdx, wb, this);
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
_status = DcacheMissStall;
@@ -637,7 +637,7 @@ OzoneLSQ<Impl>::writebackStores()
typename BackEnd::LdWritebackEvent *wb =
new typename BackEnd::LdWritebackEvent(storeQueue[storeWBIdx].inst,
be);
- wb->schedule(curTick);
+ wb->schedule(curTick());
}
completeStore(storeWBIdx);
diff --git a/src/cpu/ozone/lw_back_end_impl.hh b/src/cpu/ozone/lw_back_end_impl.hh
index 465fccbdb..8000c142e 100644
--- a/src/cpu/ozone/lw_back_end_impl.hh
+++ b/src/cpu/ozone/lw_back_end_impl.hh
@@ -45,7 +45,7 @@ LWBackEnd<Impl>::generateTrapEvent(Tick latency)
TrapEvent *trap = new TrapEvent(this);
- trap->schedule(curTick + cpu->ticks(latency));
+ trap->schedule(curTick() + cpu->ticks(latency));
thread->trapPending = true;
}
@@ -1226,7 +1226,7 @@ LWBackEnd<Impl>::commitInst(int inst_num)
// Write the done sequence number here.
toIEW->doneSeqNum = inst->seqNum;
- lastCommitCycle = curTick;
+ lastCommitCycle = curTick();
#if FULL_SYSTEM
int count = 0;
diff --git a/src/cpu/ozone/lw_lsq.hh b/src/cpu/ozone/lw_lsq.hh
index ee0312969..9605f175e 100644
--- a/src/cpu/ozone/lw_lsq.hh
+++ b/src/cpu/ozone/lw_lsq.hh
@@ -581,7 +581,7 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx)
// We'll say this has a 1 cycle load-store forwarding latency
// for now.
// @todo: Need to make this a parameter.
- wb->schedule(curTick);
+ wb->schedule(curTick());
// Should keep track of stat for forwarded data
return NoFault;
diff --git a/src/cpu/ozone/lw_lsq_impl.hh b/src/cpu/ozone/lw_lsq_impl.hh
index c714c5d38..0c4e4b9c7 100644
--- a/src/cpu/ozone/lw_lsq_impl.hh
+++ b/src/cpu/ozone/lw_lsq_impl.hh
@@ -65,7 +65,7 @@ Tick
OzoneLWLSQ<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
{
panic("O3CPU model does not work with atomic mode!");
- return curTick;
+ return curTick();
}
template <class Impl>
@@ -677,7 +677,7 @@ OzoneLWLSQ<Impl>::writebackStores()
be->addDcacheMiss(inst);
- lastDcacheStall = curTick;
+ lastDcacheStall = curTick();
_status = DcacheMissStall;
diff --git a/src/cpu/pc_event.cc b/src/cpu/pc_event.cc
index 533d61498..09bd66819 100644
--- a/src/cpu/pc_event.cc
+++ b/src/cpu/pc_event.cc
@@ -111,7 +111,7 @@ PCEventQueue::dump() const
const_iterator e = pc_map.end();
for (; i != e; ++i)
- cprintf("%d: event at %#x: %s\n", curTick, (*i)->pc(),
+ cprintf("%d: event at %#x: %s\n", curTick(), (*i)->pc(),
(*i)->descr());
}
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index de26ca2f8..35ad46158 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -267,7 +267,7 @@ AtomicSimpleCPU::activateContext(int thread_num, int delay)
numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles
- schedule(tickEvent, nextCycle(curTick + ticks(delay)));
+ schedule(tickEvent, nextCycle(curTick() + ticks(delay)));
_status = Running;
}
@@ -731,7 +731,7 @@ AtomicSimpleCPU::tick()
latency = ticks(1);
if (_status != Idle)
- schedule(tickEvent, curTick + latency);
+ schedule(tickEvent, curTick() + latency);
}
diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc
index c993110e1..13ef0648c 100644
--- a/src/cpu/simple/base.cc
+++ b/src/cpu/simple/base.cc
@@ -330,7 +330,7 @@ BaseSimpleCPU::preExecute()
if(curStaticInst)
{
#if TRACING_ON
- traceData = tracer->getInstRecord(curTick, tc,
+ traceData = tracer->getInstRecord(curTick(), tc,
curStaticInst, thread->pcState(), curMacroStaticInst);
DPRINTF(Decode,"Decode: Decoded %s instruction: 0x%x\n",
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 7307f2fc9..9192c0808 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -85,7 +85,7 @@ Tick
TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
{
panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
- return curTick;
+ return curTick();
}
void
@@ -189,7 +189,7 @@ TimingSimpleCPU::switchOut()
{
assert(_status == Running || _status == Idle);
_status = SwitchedOut;
- numCycles += tickToCycles(curTick - previousTick);
+ numCycles += tickToCycles(curTick() - previousTick);
// If we've been scheduled to resume but are then told to switch out,
// we'll need to cancel it.
@@ -217,7 +217,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
_status = Idle;
}
assert(threadContexts.size() == 1);
- previousTick = curTick;
+ previousTick = curTick();
}
@@ -235,7 +235,7 @@ TimingSimpleCPU::activateContext(int thread_num, int delay)
_status = Running;
// kick things off by initiating the fetch of the next instruction
- schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
+ schedule(fetchEvent, nextCycle(curTick() + ticks(delay)));
}
@@ -266,7 +266,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
if (req->isMmapedIpr()) {
Tick delay;
delay = TheISA::handleIprRead(thread->getTC(), pkt);
- new IprEvent(pkt, this, nextCycle(curTick + delay));
+ new IprEvent(pkt, this, nextCycle(curTick() + delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTiming(pkt)) {
@@ -355,8 +355,8 @@ TimingSimpleCPU::translationFault(Fault fault)
{
// fault may be NoFault in cases where a fault is suppressed,
// for instance prefetches.
- numCycles += tickToCycles(curTick - previousTick);
- previousTick = curTick;
+ numCycles += tickToCycles(curTick() - previousTick);
+ previousTick = curTick();
if (traceData) {
// Since there was a fault, we shouldn't trace this instruction.
@@ -538,7 +538,7 @@ TimingSimpleCPU::handleWritePacket()
if (req->isMmapedIpr()) {
Tick delay;
delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
- new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
+ new IprEvent(dcache_pkt, this, nextCycle(curTick() + delay));
_status = DcacheWaitResponse;
dcache_pkt = NULL;
} else if (!dcachePort.sendTiming(dcache_pkt)) {
@@ -726,8 +726,8 @@ TimingSimpleCPU::fetch()
_status = IcacheWaitResponse;
completeIfetch(NULL);
- numCycles += tickToCycles(curTick - previousTick);
- previousTick = curTick;
+ numCycles += tickToCycles(curTick() - previousTick);
+ previousTick = curTick();
}
}
@@ -754,8 +754,8 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
advanceInst(fault);
}
- numCycles += tickToCycles(curTick - previousTick);
- previousTick = curTick;
+ numCycles += tickToCycles(curTick() - previousTick);
+ previousTick = curTick();
}
@@ -787,8 +787,8 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
_status = Running;
- numCycles += tickToCycles(curTick - previousTick);
- previousTick = curTick;
+ numCycles += tickToCycles(curTick() - previousTick);
+ previousTick = curTick();
if (getState() == SimObject::Draining) {
if (pkt) {
@@ -862,9 +862,9 @@ TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
{
if (pkt->isResponse() && !pkt->wasNacked()) {
// delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle(curTick);
+ Tick next_tick = cpu->nextCycle(curTick());
- if (next_tick == curTick)
+ if (next_tick == curTick())
cpu->completeIfetch(pkt);
else
tickEvent.schedule(pkt, next_tick);
@@ -906,8 +906,8 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
pkt->req->getFlags().isSet(Request::NO_ACCESS));
- numCycles += tickToCycles(curTick - previousTick);
- previousTick = curTick;
+ numCycles += tickToCycles(curTick() - previousTick);
+ previousTick = curTick();
if (pkt->senderState) {
SplitFragmentSenderState * send_state =
@@ -994,9 +994,9 @@ TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
{
if (pkt->isResponse() && !pkt->wasNacked()) {
// delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle(curTick);
+ Tick next_tick = cpu->nextCycle(curTick());
- if (next_tick == curTick) {
+ if (next_tick == curTick()) {
cpu->completeDataAccess(pkt);
} else {
if (!tickEvent.scheduled()) {
diff --git a/src/cpu/simple_thread.cc b/src/cpu/simple_thread.cc
index 51e27188c..61174dd4e 100644
--- a/src/cpu/simple_thread.cc
+++ b/src/cpu/simple_thread.cc
@@ -234,7 +234,7 @@ SimpleThread::activate(int delay)
if (status() == ThreadContext::Active)
return;
- lastActivate = curTick;
+ lastActivate = curTick();
// if (status() == ThreadContext::Unallocated) {
// cpu->activateWhenReady(_threadId);
@@ -253,8 +253,8 @@ SimpleThread::suspend()
if (status() == ThreadContext::Suspended)
return;
- lastActivate = curTick;
- lastSuspend = curTick;
+ lastActivate = curTick();
+ lastSuspend = curTick();
/*
#if FULL_SYSTEM
// Don't change the status from active if there are pending interrupts
diff --git a/src/cpu/static_inst.cc b/src/cpu/static_inst.cc
index f2a72c96a..f1ec05802 100644
--- a/src/cpu/static_inst.cc
+++ b/src/cpu/static_inst.cc
@@ -51,7 +51,7 @@ StaticInst::~StaticInst()
void
StaticInst::dumpDecodeCacheStats()
{
- cerr << "Decode hash table stats @ " << curTick << ":" << endl;
+ cerr << "Decode hash table stats @ " << curTick() << ":" << endl;
cerr << "\tnum entries = " << decodeCache.size() << endl;
cerr << "\tnum buckets = " << decodeCache.bucket_count() << endl;
vector<int> hist(100, 0);
diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.cc b/src/cpu/testers/directedtest/RubyDirectedTester.cc
index 56352d14a..cc7c84dd3 100644
--- a/src/cpu/testers/directedtest/RubyDirectedTester.cc
+++ b/src/cpu/testers/directedtest/RubyDirectedTester.cc
@@ -114,7 +114,7 @@ RubyDirectedTester::hitCallback(NodeID proc, Addr addr)
addr);
generator->performCallback(proc, addr);
- schedule(directedStartEvent, curTick);
+ schedule(directedStartEvent, curTick());
}
void
@@ -122,7 +122,7 @@ RubyDirectedTester::wakeup()
{
if (m_requests_completed < m_requests_to_complete) {
if (!generator->initiate()) {
- schedule(directedStartEvent, curTick + 1);
+ schedule(directedStartEvent, curTick() + 1);
}
} else {
exitSimLoop("Ruby DirectedTester completed");
diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc
index 6f3bbd77e..9440bfec2 100644
--- a/src/cpu/testers/memtest/memtest.cc
+++ b/src/cpu/testers/memtest/memtest.cc
@@ -69,14 +69,14 @@ MemTest::CpuPort::recvAtomic(PacketPtr pkt)
// must be snoop upcall
assert(pkt->isRequest());
assert(pkt->getDest() == Packet::Broadcast);
- return curTick;
+ return curTick();
}
void
MemTest::CpuPort::recvFunctional(PacketPtr pkt)
{
//Do nothing if we see one come through
-// if (curTick != 0)//Supress warning durring initialization
+// if (curTick() != 0)//Supress warning durring initialization
// warn("Functional Writes not implemented in MemTester\n");
//Need to find any response values that intersect and update
return;
@@ -220,7 +220,7 @@ MemTest::completeRequest(PacketPtr pkt)
if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
panic("%s: read of %x (blk %x) @ cycle %d "
"returns %x, expected %x\n", name(),
- req->getPaddr(), blockAddr(req->getPaddr()), curTick,
+ req->getPaddr(), blockAddr(req->getPaddr()), curTick(),
*pkt_data, *data);
}
@@ -229,7 +229,7 @@ MemTest::completeRequest(PacketPtr pkt)
if (numReads == (uint64_t)nextProgressMessage) {
ccprintf(cerr, "%s: completed %d read accesses @%d\n",
- name(), numReads, curTick);
+ name(), numReads, curTick());
nextProgressMessage += progressInterval;
}
@@ -272,13 +272,13 @@ void
MemTest::tick()
{
if (!tickEvent.scheduled())
- schedule(tickEvent, curTick + ticks(1));
+ schedule(tickEvent, curTick() + ticks(1));
if (++noResponseCycles >= 500000) {
if (issueDmas) {
cerr << "DMA tester ";
}
- cerr << name() << ": deadlocked at cycle " << curTick << endl;
+ cerr << name() << ": deadlocked at cycle " << curTick() << endl;
fatal("");
}
diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc
index 0d384b08a..a33351312 100644
--- a/src/cpu/testers/rubytest/Check.cc
+++ b/src/cpu/testers/rubytest/Check.cc
@@ -98,7 +98,7 @@ Check::initiatePrefetch()
}
// Prefetches are assumed to be 0 sized
- Request *req = new Request(m_address.getAddress(), 0, flags, curTick,
+ Request *req = new Request(m_address.getAddress(), 0, flags, curTick(),
m_pc.getAddress());
PacketPtr pkt = new Packet(req, cmd, port->idx);
@@ -139,7 +139,7 @@ Check::initiateAction()
Address writeAddr(m_address.getAddress() + m_store_count);
// Stores are assumed to be 1 byte-sized
- Request *req = new Request(writeAddr.getAddress(), 1, flags, curTick,
+ Request *req = new Request(writeAddr.getAddress(), 1, flags, curTick(),
m_pc.getAddress());
Packet::Command cmd;
@@ -205,7 +205,7 @@ Check::initiateCheck()
// Checks are sized depending on the number of bytes written
Request *req = new Request(m_address.getAddress(), CHECK_SIZE, flags,
- curTick, m_pc.getAddress());
+ curTick(), m_pc.getAddress());
PacketPtr pkt = new Packet(req, MemCmd::ReadReq, port->idx);
uint8_t* dataArray = new uint8_t[CHECK_SIZE];
diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc
index 8c5aafd89..1d477dad2 100644
--- a/src/cpu/testers/rubytest/RubyTester.cc
+++ b/src/cpu/testers/rubytest/RubyTester.cc
@@ -160,7 +160,7 @@ RubyTester::wakeup()
checkForDeadlock();
- schedule(checkStartEvent, curTick + m_wakeup_frequency);
+ schedule(checkStartEvent, curTick() + m_wakeup_frequency);
} else {
exitSimLoop("Ruby Tester completed");
}
diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc
index b286f1e40..70aa1f042 100644
--- a/src/cpu/trace/trace_cpu.cc
+++ b/src/cpu/trace/trace_cpu.cc
@@ -66,13 +66,13 @@ TraceCPU::tick()
int instReqs = 0;
int dataReqs = 0;
- while (nextReq && curTick >= nextCycle) {
+ while (nextReq && curTick() >= nextCycle) {
assert(nextReq->thread_num < 4 && "Not enough threads");
if (nextReq->isInstFetch() && icacheInterface) {
if (icacheInterface->isBlocked())
break;
- nextReq->time = curTick;
+ nextReq->time = curTick();
if (nextReq->cmd == Squash) {
icacheInterface->squash(nextReq->asid);
} else {
@@ -91,7 +91,7 @@ TraceCPU::tick()
break;
++dataReqs;
- nextReq->time = curTick;
+ nextReq->time = curTick();
if (dcacheInterface->doEvents()) {
nextReq->completionEvent =
new TraceCompleteEvent(nextReq, this);
@@ -113,7 +113,7 @@ TraceCPU::tick()
tickEvent.schedule(mainEventQueue.nextEventTime() + ticks(1));
}
} else {
- tickEvent.schedule(max(curTick + ticks(1), nextCycle));
+ tickEvent.schedule(max(curTick() + ticks(1), nextCycle));
}
}
diff --git a/src/dev/alpha/backdoor.cc b/src/dev/alpha/backdoor.cc
index 660ef00c5..960832f8c 100644
--- a/src/dev/alpha/backdoor.cc
+++ b/src/dev/alpha/backdoor.cc
@@ -234,7 +234,7 @@ AlphaBackdoor::write(PacketPtr pkt)
default:
int cpunum = (daddr - offsetof(AlphaAccess, cpuStack)) /
sizeof(alphaAccess->cpuStack[0]);
- inform("Launching CPU %d @ %d", cpunum, curTick);
+ inform("Launching CPU %d @ %d", cpunum, curTick());
assert(val > 0 && "Must not access primary cpu");
if (cpunum >= 0 && cpunum < 64)
alphaAccess->cpuStack[cpunum] = val;
diff --git a/src/dev/arm/pl011.cc b/src/dev/arm/pl011.cc
index d8ea9409b..dd1c8863e 100644
--- a/src/dev/arm/pl011.cc
+++ b/src/dev/arm/pl011.cc
@@ -187,7 +187,7 @@ Pl011::write(PacketPtr pkt)
DPRINTF(Uart, "TX int enabled, scheduling interruptt\n");
rawInt.txim = 1;
if (!intEvent.scheduled())
- schedule(intEvent, curTick + intDelay);
+ schedule(intEvent, curTick() + intDelay);
}
break;
@@ -217,7 +217,7 @@ Pl011::write(PacketPtr pkt)
DPRINTF(Uart, "Writing to IMSC: TX int enabled, scheduling interruptt\n");
rawInt.txim = 1;
if (!intEvent.scheduled())
- schedule(intEvent, curTick + intDelay);
+ schedule(intEvent, curTick() + intDelay);
}
break;
@@ -252,7 +252,7 @@ Pl011::dataAvailable()
DPRINTF(Uart, "Data available, scheduling interrupt\n");
if (!intEvent.scheduled())
- schedule(intEvent, curTick + intDelay);
+ schedule(intEvent, curTick() + intDelay);
}
void
diff --git a/src/dev/arm/pl111.cc b/src/dev/arm/pl111.cc
index e78d28141..e597bf272 100644
--- a/src/dev/arm/pl111.cc
+++ b/src/dev/arm/pl111.cc
@@ -355,7 +355,7 @@ Pl111::readFramebuffer()
startAddr = lcdUpbase;
}
curAddr = 0;
- startTime = curTick;
+ startTime = curTick();
maxAddr = static_cast<Addr>(length*sizeof(uint32_t));
dmaPendingNum =0 ;
@@ -388,9 +388,9 @@ Pl111::dmaDone()
DPRINTF(PL111, " -- DMA pending number %d\n", dmaPendingNum);
if (maxAddr == curAddr && !dmaPendingNum) {
- if ((curTick - startTime) > maxFrameTime)
+ if ((curTick() - startTime) > maxFrameTime)
warn("CLCD controller buffer underrun, took %d cycles when should"
- " have taken %d\n", curTick - startTime, maxFrameTime);
+ " have taken %d\n", curTick() - startTime, maxFrameTime);
// double buffering so the vnc server doesn't see a tear in the screen
memcpy(frameBuffer, dmaBuffer, maxAddr);
@@ -400,7 +400,7 @@ Pl111::dmaDone()
writeBMP(frameBuffer);
DPRINTF(PL111, "-- schedule next dma read event at %d tick \n",
- maxFrameTime + curTick);
+ maxFrameTime + curTick());
schedule(readEvent, nextCycle(startTime + maxFrameTime));
}
@@ -415,7 +415,7 @@ Pl111::dmaDone()
Tick
Pl111::nextCycle()
{
- Tick nextTick = curTick + clock - 1;
+ Tick nextTick = curTick() + clock - 1;
nextTick -= nextTick%clock;
return nextTick;
}
@@ -427,7 +427,7 @@ Pl111::nextCycle(Tick beginTick)
if (nextTick%clock!=0)
nextTick = nextTick - (nextTick%clock) + clock;
- assert(nextTick >= curTick);
+ assert(nextTick >= curTick());
return nextTick;
}
diff --git a/src/dev/arm/rv_ctrl.cc b/src/dev/arm/rv_ctrl.cc
index 08ac07233..c0ba4c7aa 100644
--- a/src/dev/arm/rv_ctrl.cc
+++ b/src/dev/arm/rv_ctrl.cc
@@ -62,7 +62,7 @@ RealViewCtrl::read(PacketPtr pkt)
break;
case Clock24:
Tick clk;
- clk = (Tick)(curTick / (24 * SimClock::Float::MHz));
+ clk = (Tick)(curTick() / (24 * SimClock::Float::MHz));
pkt->set((uint32_t)(clk));
break;
case Flash:
diff --git a/src/dev/arm/timer_sp804.cc b/src/dev/arm/timer_sp804.cc
index f6bbfb515..04668d268 100644
--- a/src/dev/arm/timer_sp804.cc
+++ b/src/dev/arm/timer_sp804.cc
@@ -93,7 +93,7 @@ Sp804::Timer::read(PacketPtr pkt, Addr daddr)
DPRINTF(Timer, "Event schedule for %d, clock=%d, prescale=%d\n",
zeroEvent.when(), clock, control.timerPrescale);
Tick time;
- time = zeroEvent.when() - curTick;
+ time = zeroEvent.when() - curTick();
time = time / clock / power(16, control.timerPrescale);
DPRINTF(Timer, "-- returning counter at %d\n", time);
pkt->set<uint32_t>(time);
@@ -188,8 +188,8 @@ Sp804::Timer::restartCounter(uint32_t val)
DPRINTF(Timer, "-- Event was already schedule, de-scheduling\n");
parent->deschedule(zeroEvent);
}
- parent->schedule(zeroEvent, curTick + time);
- DPRINTF(Timer, "-- Scheduling new event for: %d\n", curTick + time);
+ parent->schedule(zeroEvent, curTick() + time);
+ DPRINTF(Timer, "-- Scheduling new event for: %d\n", curTick() + time);
}
void
diff --git a/src/dev/etherbus.cc b/src/dev/etherbus.cc
index 063a594e7..b072e16f8 100644
--- a/src/dev/etherbus.cc
+++ b/src/dev/etherbus.cc
@@ -87,7 +87,7 @@ bool
EtherBus::send(EtherInt *sndr, EthPacketPtr &pkt)
{
if (busy()) {
- DPRINTF(Ethernet, "ethernet packet not sent, bus busy\n", curTick);
+ DPRINTF(Ethernet, "ethernet packet not sent, bus busy\n", curTick());
return false;
}
@@ -99,7 +99,7 @@ EtherBus::send(EtherInt *sndr, EthPacketPtr &pkt)
int delay = (int)ceil(((double)pkt->length * ticksPerByte) + 1.0);
DPRINTF(Ethernet, "scheduling packet: delay=%d, (rate=%f)\n",
delay, ticksPerByte);
- schedule(event, curTick + delay);
+ schedule(event, curTick() + delay);
return true;
}
diff --git a/src/dev/etherdump.cc b/src/dev/etherdump.cc
index 9cb15c4e8..21532bd59 100644
--- a/src/dev/etherdump.cc
+++ b/src/dev/etherdump.cc
@@ -94,8 +94,8 @@ void
EtherDump::dumpPacket(EthPacketPtr &packet)
{
pcap_pkthdr pkthdr;
- pkthdr.seconds = curTick / SimClock::Int::s;
- pkthdr.microseconds = (curTick / SimClock::Int::us) % ULL(1000000);
+ pkthdr.seconds = curTick() / SimClock::Int::s;
+ pkthdr.microseconds = (curTick() / SimClock::Int::us) % ULL(1000000);
pkthdr.caplen = std::min(packet->length, maxlen);
pkthdr.len = packet->length;
stream->write(reinterpret_cast<char *>(&pkthdr), sizeof(pkthdr));
diff --git a/src/dev/etherlink.cc b/src/dev/etherlink.cc
index f3f38fc20..9d8d8cfa8 100644
--- a/src/dev/etherlink.cc
+++ b/src/dev/etherlink.cc
@@ -154,7 +154,7 @@ EtherLink::Link::txDone()
if (linkDelay > 0) {
DPRINTF(Ethernet, "packet delayed: delay=%d\n", linkDelay);
Event *event = new LinkDelayEvent(this, packet);
- parent->schedule(event, curTick + linkDelay);
+ parent->schedule(event, curTick() + linkDelay);
} else {
txComplete(packet);
}
@@ -183,7 +183,7 @@ EtherLink::Link::transmit(EthPacketPtr pkt)
DPRINTF(Ethernet, "scheduling packet: delay=%d, (rate=%f)\n",
delay, ticksPerByte);
- parent->schedule(doneEvent, curTick + delay);
+ parent->schedule(doneEvent, curTick() + delay);
return true;
}
diff --git a/src/dev/ethertap.cc b/src/dev/ethertap.cc
index 85d6370be..76952e5c0 100644
--- a/src/dev/ethertap.cc
+++ b/src/dev/ethertap.cc
@@ -246,7 +246,7 @@ EtherTap::process(int revent)
DPRINTF(Ethernet, "bus busy...buffer for retransmission\n");
packetBuffer.push(packet);
if (!txEvent.scheduled())
- schedule(txEvent, curTick + retryTime);
+ schedule(txEvent, curTick() + retryTime);
} else if (dump) {
dump->dump(packet);
}
@@ -269,7 +269,7 @@ EtherTap::retransmit()
}
if (!packetBuffer.empty() && !txEvent.scheduled())
- schedule(txEvent, curTick + retryTime);
+ schedule(txEvent, curTick() + retryTime);
}
EtherInt*
diff --git a/src/dev/i8254xGBe.cc b/src/dev/i8254xGBe.cc
index 2a044ebbe..78a897815 100644
--- a/src/dev/i8254xGBe.cc
+++ b/src/dev/i8254xGBe.cc
@@ -695,11 +695,11 @@ IGbE::postInterrupt(IntTypes t, bool now)
Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
DPRINTF(EthernetIntr,
- "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n",
- curTick, regs.itr.interval(), itr_interval);
+ "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
+ curTick(), regs.itr.interval(), itr_interval);
if (regs.itr.interval() == 0 || now ||
- lastInterrupt + itr_interval <= curTick) {
+ lastInterrupt + itr_interval <= curTick()) {
if (interEvent.scheduled()) {
deschedule(interEvent);
}
@@ -763,7 +763,7 @@ IGbE::cpuPostInt()
intrPost();
- lastInterrupt = curTick;
+ lastInterrupt = curTick();
}
void
@@ -801,7 +801,7 @@ IGbE::chkInterrupt()
DPRINTF(Ethernet,
"Possibly scheduling interrupt because of imr write\n");
if (!interEvent.scheduled()) {
- Tick t = curTick + SimClock::Int::ns * 256 * regs.itr.interval();
+ Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
DPRINTF(Ethernet, "Scheduling for %d\n", t);
schedule(interEvent, t);
}
@@ -888,7 +888,7 @@ IGbE::DescCache<T>::writeback(Addr aMask)
wbOut = max_to_wb;
assert(!wbDelayEvent.scheduled());
- igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
+ igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
igbe->anBegin(annSmWb, "Prepare Writeback Desc");
}
@@ -898,7 +898,7 @@ IGbE::DescCache<T>::writeback1()
{
// If we're draining delay issuing this DMA
if (igbe->getState() != SimObject::Running) {
- igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
+ igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
return;
}
@@ -969,7 +969,7 @@ IGbE::DescCache<T>::fetchDescriptors()
curFetching = max_to_fetch;
assert(!fetchDelayEvent.scheduled());
- igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
+ igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
}
@@ -979,7 +979,7 @@ IGbE::DescCache<T>::fetchDescriptors1()
{
// If we're draining delay issuing this DMA
if (igbe->getState() != SimObject::Running) {
- igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
+ igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
return;
}
@@ -1440,14 +1440,14 @@ IGbE::RxDescCache::pktComplete()
if (igbe->regs.rdtr.delay()) {
Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
- igbe->reschedule(igbe->rdtrEvent, curTick + delay);
+ igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
}
if (igbe->regs.radv.idv()) {
Tick delay = igbe->regs.radv.idv() * igbe->intClock();
DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
if (!igbe->radvEvent.scheduled()) {
- igbe->schedule(igbe->radvEvent, curTick + delay);
+ igbe->schedule(igbe->radvEvent, curTick() + delay);
}
}
@@ -1880,14 +1880,14 @@ IGbE::TxDescCache::pktComplete()
if (igbe->regs.tidv.idv()) {
Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
DPRINTF(EthernetDesc, "setting tidv\n");
- igbe->reschedule(igbe->tidvEvent, curTick + delay, true);
+ igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
}
if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
DPRINTF(EthernetDesc, "setting tadv\n");
if (!igbe->tadvEvent.scheduled()) {
- igbe->schedule(igbe->tadvEvent, curTick + delay);
+ igbe->schedule(igbe->tadvEvent, curTick() + delay);
}
}
}
@@ -2039,7 +2039,7 @@ IGbE::restartClock()
{
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
getState() == SimObject::Running)
- schedule(tickEvent, (curTick / ticks(1)) * ticks(1) + ticks(1));
+ schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
}
unsigned int
@@ -2420,7 +2420,7 @@ IGbE::tick()
if (rxTick || txTick || txFifoTick)
- schedule(tickEvent, curTick + ticks(1));
+ schedule(tickEvent, curTick() + ticks(1));
}
void
diff --git a/src/dev/ide_disk.cc b/src/dev/ide_disk.cc
index fe93924f9..a30811d4e 100644
--- a/src/dev/ide_disk.cc
+++ b/src/dev/ide_disk.cc
@@ -323,7 +323,7 @@ IdeDisk::doDmaTransfer()
dmaState, devState);
if (ctrl->dmaPending() || ctrl->getState() != SimObject::Running) {
- schedule(dmaTransferEvent, curTick + DMA_BACKOFF_PERIOD);
+ schedule(dmaTransferEvent, curTick() + DMA_BACKOFF_PERIOD);
return;
} else
ctrl->dmaRead(curPrdAddr, sizeof(PrdEntry_t), &dmaPrdReadEvent,
@@ -357,7 +357,7 @@ IdeDisk::doDmaDataRead()
DPRINTF(IdeDisk, "doDmaRead, diskDelay: %d totalDiskDelay: %d\n",
diskDelay, totalDiskDelay);
- schedule(dmaReadWaitEvent, curTick + totalDiskDelay);
+ schedule(dmaReadWaitEvent, curTick() + totalDiskDelay);
}
void
@@ -403,7 +403,7 @@ IdeDisk::doDmaRead()
}
if (ctrl->dmaPending() || ctrl->getState() != SimObject::Running) {
- schedule(dmaReadWaitEvent, curTick + DMA_BACKOFF_PERIOD);
+ schedule(dmaReadWaitEvent, curTick() + DMA_BACKOFF_PERIOD);
return;
} else if (!dmaReadCG->done()) {
assert(dmaReadCG->complete() < MAX_DMA_SIZE);
@@ -465,7 +465,7 @@ IdeDisk::doDmaDataWrite()
cmdBytesLeft -= SectorSize;
}
- schedule(dmaWriteWaitEvent, curTick + totalDiskDelay);
+ schedule(dmaWriteWaitEvent, curTick() + totalDiskDelay);
}
void
@@ -478,7 +478,7 @@ IdeDisk::doDmaWrite()
curPrd.getByteCount(), TheISA::PageBytes);
}
if (ctrl->dmaPending() || ctrl->getState() != SimObject::Running) {
- schedule(dmaWriteWaitEvent, curTick + DMA_BACKOFF_PERIOD);
+ schedule(dmaWriteWaitEvent, curTick() + DMA_BACKOFF_PERIOD);
return;
} else if (!dmaWriteCG->done()) {
assert(dmaWriteCG->complete() < MAX_DMA_SIZE);
@@ -553,7 +553,7 @@ IdeDisk::startDma(const uint32_t &prdTableBase)
dmaState = Dma_Transfer;
// schedule dma transfer (doDmaTransfer)
- schedule(dmaTransferEvent, curTick + 1);
+ schedule(dmaTransferEvent, curTick() + 1);
}
void
diff --git a/src/dev/intel_8254_timer.cc b/src/dev/intel_8254_timer.cc
index 9e507b968..aee716c01 100644
--- a/src/dev/intel_8254_timer.cc
+++ b/src/dev/intel_8254_timer.cc
@@ -281,9 +281,9 @@ Intel8254Timer::Counter::CounterEvent::setTo(int clocks)
{
if (clocks == 0)
panic("Timer can't be set to go off instantly.\n");
- DPRINTF(Intel8254Timer, "Timer set to curTick + %d\n",
+ DPRINTF(Intel8254Timer, "Timer set to curTick() + %d\n",
clocks * interval);
- counter->parent->schedule(this, curTick + clocks * interval);
+ counter->parent->schedule(this, curTick() + clocks * interval);
}
int
@@ -291,7 +291,7 @@ Intel8254Timer::Counter::CounterEvent::clocksLeft()
{
if (!scheduled())
return -1;
- return (when() - curTick + interval - 1) / interval;
+ return (when() - curTick() + interval - 1) / interval;
}
const char *
diff --git a/src/dev/io_device.cc b/src/dev/io_device.cc
index 08269bf51..be97bc4ad 100644
--- a/src/dev/io_device.cc
+++ b/src/dev/io_device.cc
@@ -121,7 +121,7 @@ DmaPort::recvTiming(PacketPtr pkt)
else if (backoffTime < maxBackoffDelay)
backoffTime <<= 1;
- reschedule(backoffEvent, curTick + backoffTime, true);
+ reschedule(backoffEvent, curTick() + backoffTime, true);
DPRINTF(DMA, "Backoff time set to %d ticks\n", backoffTime);
@@ -144,7 +144,7 @@ DmaPort::recvTiming(PacketPtr pkt)
if (state->totBytes == state->numBytes) {
if (state->completionEvent) {
if (state->delay)
- schedule(state->completionEvent, curTick + state->delay);
+ schedule(state->completionEvent, curTick() + state->delay);
else
state->completionEvent->process();
}
@@ -212,9 +212,9 @@ DmaPort::recvRetry()
} while (!backoffTime && result && transmitList.size());
if (transmitList.size() && backoffTime && !inRetry) {
- DPRINTF(DMA, "Scheduling backoff for %d\n", curTick+backoffTime);
+ DPRINTF(DMA, "Scheduling backoff for %d\n", curTick()+backoffTime);
if (!backoffEvent.scheduled())
- schedule(backoffEvent, backoffTime + curTick);
+ schedule(backoffEvent, backoffTime + curTick());
}
DPRINTF(DMA, "TransmitList: %d, backoffTime: %d inRetry: %d es: %d\n",
transmitList.size(), backoffTime, inRetry,
@@ -299,8 +299,8 @@ DmaPort::sendDma()
if (transmitList.size() && backoffTime && !inRetry &&
!backoffEvent.scheduled()) {
DPRINTF(DMA, "-- Scheduling backoff timer for %d\n",
- backoffTime+curTick);
- schedule(backoffEvent, backoffTime + curTick);
+ backoffTime+curTick());
+ schedule(backoffEvent, backoffTime + curTick());
}
} else if (state == Enums::atomic) {
transmitList.pop_front();
@@ -322,7 +322,7 @@ DmaPort::sendDma()
if (state->totBytes == state->numBytes) {
if (state->completionEvent) {
assert(!state->completionEvent->scheduled());
- schedule(state->completionEvent, curTick + lat + state->delay);
+ schedule(state->completionEvent, curTick() + lat + state->delay);
}
delete state;
delete pkt->req;
diff --git a/src/dev/mc146818.cc b/src/dev/mc146818.cc
index 16ed58e46..987b1bcd3 100644
--- a/src/dev/mc146818.cc
+++ b/src/dev/mc146818.cc
@@ -214,9 +214,9 @@ MC146818::serialize(const string &base, ostream &os)
// save the timer tick and rtc clock tick values to correctly reschedule
// them during unserialize
//
- Tick rtcTimerInterruptTickOffset = event.when() - curTick;
+ Tick rtcTimerInterruptTickOffset = event.when() - curTick();
SERIALIZE_SCALAR(rtcTimerInterruptTickOffset);
- Tick rtcClockTickOffset = event.when() - curTick;
+ Tick rtcClockTickOffset = event.when() - curTick();
SERIALIZE_SCALAR(rtcClockTickOffset);
}
@@ -234,30 +234,30 @@ MC146818::unserialize(const string &base, Checkpoint *cp,
//
Tick rtcTimerInterruptTickOffset;
UNSERIALIZE_SCALAR(rtcTimerInterruptTickOffset);
- reschedule(event, curTick + rtcTimerInterruptTickOffset);
+ reschedule(event, curTick() + rtcTimerInterruptTickOffset);
Tick rtcClockTickOffset;
UNSERIALIZE_SCALAR(rtcClockTickOffset);
- reschedule(tickEvent, curTick + rtcClockTickOffset);
+ reschedule(tickEvent, curTick() + rtcClockTickOffset);
}
MC146818::RTCEvent::RTCEvent(MC146818 * _parent, Tick i)
: parent(_parent), interval(i)
{
DPRINTF(MC146818, "RTC Event Initilizing\n");
- parent->schedule(this, curTick + interval);
+ parent->schedule(this, curTick() + interval);
}
void
MC146818::RTCEvent::scheduleIntr()
{
- parent->schedule(this, curTick + interval);
+ parent->schedule(this, curTick() + interval);
}
void
MC146818::RTCEvent::process()
{
DPRINTF(MC146818, "RTC Timer Interrupt\n");
- parent->schedule(this, curTick + interval);
+ parent->schedule(this, curTick() + interval);
parent->handleEvent();
}
@@ -271,7 +271,7 @@ void
MC146818::RTCTickEvent::process()
{
DPRINTF(MC146818, "RTC clock tick\n");
- parent->schedule(this, curTick + SimClock::Int::s);
+ parent->schedule(this, curTick() + SimClock::Int::s);
parent->tickClock();
}
diff --git a/src/dev/mc146818.hh b/src/dev/mc146818.hh
index 699785199..576c4ab9f 100644
--- a/src/dev/mc146818.hh
+++ b/src/dev/mc146818.hh
@@ -71,7 +71,7 @@ class MC146818 : public EventManager
RTCTickEvent(MC146818 * _parent) : parent(_parent)
{
- parent->schedule(this, curTick + SimClock::Int::s);
+ parent->schedule(this, curTick() + SimClock::Int::s);
}
/** Event process to occur at interrupt*/
diff --git a/src/dev/ns_gige.cc b/src/dev/ns_gige.cc
index 86f081ec5..794fd5355 100644
--- a/src/dev/ns_gige.cc
+++ b/src/dev/ns_gige.cc
@@ -848,7 +848,7 @@ NSGigE::devIntrPost(uint32_t interrupts)
interrupts, regs.isr, regs.imr);
if ((regs.isr & regs.imr)) {
- Tick when = curTick;
+ Tick when = curTick();
if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
when += intrDelay;
postedInterrupts++;
@@ -910,7 +910,7 @@ NSGigE::devIntrChangeMask()
regs.isr, regs.imr, regs.isr & regs.imr);
if (regs.isr & regs.imr)
- cpuIntrPost(curTick);
+ cpuIntrPost(curTick());
else
cpuIntrClear();
}
@@ -927,8 +927,8 @@ NSGigE::cpuIntrPost(Tick when)
* @todo this warning should be removed and the intrTick code should
* be fixed.
*/
- assert(when >= curTick);
- assert(intrTick >= curTick || intrTick == 0);
+ assert(when >= curTick());
+ assert(intrTick >= curTick() || intrTick == 0);
if (when > intrTick && intrTick != 0) {
DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
intrTick);
@@ -936,9 +936,9 @@ NSGigE::cpuIntrPost(Tick when)
}
intrTick = when;
- if (intrTick < curTick) {
+ if (intrTick < curTick()) {
debug_break();
- intrTick = curTick;
+ intrTick = curTick();
}
DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
@@ -953,7 +953,7 @@ NSGigE::cpuIntrPost(Tick when)
void
NSGigE::cpuInterrupt()
{
- assert(intrTick == curTick);
+ assert(intrTick == curTick());
// Whether or not there's a pending interrupt, we don't care about
// it anymore
@@ -1125,7 +1125,7 @@ NSGigE::rxKick()
next:
if (clock) {
- if (rxKickTick > curTick) {
+ if (rxKickTick > curTick()) {
DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
rxKickTick);
@@ -1133,7 +1133,7 @@ NSGigE::rxKick()
}
// Go to the next state machine clock tick.
- rxKickTick = curTick + ticks(1);
+ rxKickTick = curTick() + ticks(1);
}
switch(rxDmaState) {
@@ -1494,7 +1494,7 @@ NSGigE::transmit()
if (!txFifo.empty() && !txEvent.scheduled()) {
DPRINTF(Ethernet, "reschedule transmit\n");
- schedule(txEvent, curTick + retryTime);
+ schedule(txEvent, curTick() + retryTime);
}
}
@@ -1573,14 +1573,14 @@ NSGigE::txKick()
next:
if (clock) {
- if (txKickTick > curTick) {
+ if (txKickTick > curTick()) {
DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
txKickTick);
goto exit;
}
// Go to the next state machine clock tick.
- txKickTick = curTick + ticks(1);
+ txKickTick = curTick() + ticks(1);
}
switch(txDmaState) {
@@ -2001,7 +2001,7 @@ NSGigE::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
- reschedule(txEvent, curTick + ticks(1), true);
+ reschedule(txEvent, curTick() + ticks(1), true);
}
bool
@@ -2259,7 +2259,7 @@ NSGigE::serialize(ostream &os)
* If there's a pending transmit, store the time so we can
* reschedule it later
*/
- Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
+ Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
SERIALIZE_SCALAR(transmitTick);
/*
@@ -2440,7 +2440,7 @@ NSGigE::unserialize(Checkpoint *cp, const std::string &section)
Tick transmitTick;
UNSERIALIZE_SCALAR(transmitTick);
if (transmitTick)
- schedule(txEvent, curTick + transmitTick);
+ schedule(txEvent, curTick() + transmitTick);
/*
* unserialize receive address filter settings
diff --git a/src/dev/sinic.cc b/src/dev/sinic.cc
index 2183d9d99..98a2426f5 100644
--- a/src/dev/sinic.cc
+++ b/src/dev/sinic.cc
@@ -618,7 +618,7 @@ Device::devIntrPost(uint32_t interrupts)
interrupts &= ~Regs::Intr_TxLow;
if (interrupts) {
- Tick when = curTick;
+ Tick when = curTick();
if ((interrupts & Regs::Intr_NoDelay) == 0)
when += intrDelay;
cpuIntrPost(when);
@@ -654,7 +654,7 @@ Device::devIntrChangeMask(uint32_t newmask)
regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask);
if (regs.IntrStatus & regs.IntrMask)
- cpuIntrPost(curTick);
+ cpuIntrPost(curTick());
else
cpuIntrClear();
}
@@ -671,8 +671,8 @@ Base::cpuIntrPost(Tick when)
* @todo this warning should be removed and the intrTick code should
* be fixed.
*/
- assert(when >= curTick);
- assert(intrTick >= curTick || intrTick == 0);
+ assert(when >= curTick());
+ assert(intrTick >= curTick() || intrTick == 0);
if (!cpuIntrEnable) {
DPRINTF(EthernetIntr, "interrupts not enabled.\n",
intrTick);
@@ -686,9 +686,9 @@ Base::cpuIntrPost(Tick when)
}
intrTick = when;
- if (intrTick < curTick) {
+ if (intrTick < curTick()) {
debug_break();
- intrTick = curTick;
+ intrTick = curTick();
}
DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
@@ -703,7 +703,7 @@ Base::cpuIntrPost(Tick when)
void
Base::cpuInterrupt()
{
- assert(intrTick == curTick);
+ assert(intrTick == curTick());
// Whether or not there's a pending interrupt, we don't care about
// it anymore
@@ -759,7 +759,7 @@ Device::changeConfig(uint32_t newconf)
cpuIntrEnable = regs.Config & Regs::Config_IntEn;
if (cpuIntrEnable) {
if (regs.IntrStatus & regs.IntrMask)
- cpuIntrPost(curTick);
+ cpuIntrPost(curTick());
} else {
cpuIntrClear();
}
@@ -882,7 +882,7 @@ Device::rxKick()
DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n",
RxStateStrings[rxState], rxFifo.size());
- if (rxKickTick > curTick) {
+ if (rxKickTick > curTick()) {
DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n",
rxKickTick);
return;
@@ -1196,7 +1196,7 @@ Device::txKick()
DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n",
TxStateStrings[txState], txFifo.size());
- if (txKickTick > curTick) {
+ if (txKickTick > curTick()) {
DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n",
txKickTick);
return;
@@ -1317,7 +1317,7 @@ Device::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
- reschedule(txEvent, curTick + ticks(1), true);
+ reschedule(txEvent, curTick() + ticks(1), true);
}
bool
@@ -1573,7 +1573,7 @@ Device::serialize(std::ostream &os)
* If there's a pending transmit, store the time so we can
* reschedule it later
*/
- Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
+ Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
SERIALIZE_SCALAR(transmitTick);
}
@@ -1708,7 +1708,7 @@ Device::unserialize(Checkpoint *cp, const std::string &section)
Tick transmitTick;
UNSERIALIZE_SCALAR(transmitTick);
if (transmitTick)
- schedule(txEvent, curTick + transmitTick);
+ schedule(txEvent, curTick() + transmitTick);
pioPort->sendStatusChange(Port::RangeChange);
diff --git a/src/dev/uart8250.cc b/src/dev/uart8250.cc
index f8bd23311..f33a428f7 100644
--- a/src/dev/uart8250.cc
+++ b/src/dev/uart8250.cc
@@ -68,7 +68,7 @@ Uart8250::IntrEvent::process()
DPRINTF(Uart, "UART InterEvent, interrupting\n");
uart->platform->postConsoleInt();
uart->status |= intrBit;
- uart->lastTxInt = curTick;
+ uart->lastTxInt = curTick();
}
else
DPRINTF(Uart, "UART InterEvent, not interrupting\n");
@@ -92,11 +92,11 @@ Uart8250::IntrEvent::scheduleIntr()
{
static const Tick interval = 225 * SimClock::Int::ns;
DPRINTF(Uart, "Scheduling IER interrupt for %#x, at cycle %lld\n", intrBit,
- curTick + interval);
+ curTick() + interval);
if (!scheduled())
- uart->schedule(this, curTick + interval);
+ uart->schedule(this, curTick() + interval);
else
- uart->reschedule(this, curTick + interval);
+ uart->reschedule(this, curTick() + interval);
}
@@ -218,13 +218,13 @@ Uart8250::write(PacketPtr pkt)
if (UART_IER_THRI & IER)
{
DPRINTF(Uart, "IER: IER_THRI set, scheduling TX intrrupt\n");
- if (curTick - lastTxInt > 225 * SimClock::Int::ns) {
+ if (curTick() - lastTxInt > 225 * SimClock::Int::ns) {
DPRINTF(Uart, "-- Interrupting Immediately... %d,%d\n",
- curTick, lastTxInt);
+ curTick(), lastTxInt);
txIntrEvent.process();
} else {
DPRINTF(Uart, "-- Delaying interrupt... %d,%d\n",
- curTick, lastTxInt);
+ curTick(), lastTxInt);
txIntrEvent.scheduleIntr();
}
}
diff --git a/src/kern/kernel_stats.cc b/src/kern/kernel_stats.cc
index aa9342a13..d1de00fcd 100644
--- a/src/kern/kernel_stats.cc
+++ b/src/kern/kernel_stats.cc
@@ -120,8 +120,8 @@ Statistics::swpipl(int ipl)
return;
_iplGood[ipl]++;
- _iplTicks[iplLast] += curTick - iplLastTick;
- iplLastTick = curTick;
+ _iplTicks[iplLast] += curTick() - iplLastTick;
+ iplLastTick = curTick();
iplLast = ipl;
}
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 668b492e8..4b8325088 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -158,7 +158,7 @@ Bridge::BridgePort::nackRequest(PacketPtr pkt)
pkt->setNacked();
//put it on the list to send
- Tick readyTime = curTick + nackDelay;
+ Tick readyTime = curTick() + nackDelay;
PacketBuffer *buf = new PacketBuffer(pkt, readyTime, true);
// nothing on the list, add it and we're done
@@ -221,7 +221,7 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt)
- Tick readyTime = curTick + delay;
+ Tick readyTime = curTick() + delay;
PacketBuffer *buf = new PacketBuffer(pkt, readyTime);
// If we're about to put this packet at the head of the queue, we
@@ -241,7 +241,7 @@ Bridge::BridgePort::trySend()
PacketBuffer *buf = sendQueue.front();
- assert(buf->ready <= curTick);
+ assert(buf->ready <= curTick());
PacketPtr pkt = buf->pkt;
@@ -283,7 +283,7 @@ Bridge::BridgePort::trySend()
if (!sendQueue.empty()) {
buf = sendQueue.front();
DPRINTF(BusBridge, "Scheduling next send\n");
- schedule(sendEvent, std::max(buf->ready, curTick + 1));
+ schedule(sendEvent, std::max(buf->ready, curTick() + 1));
}
} else {
DPRINTF(BusBridge, " unsuccessful\n");
@@ -301,7 +301,7 @@ Bridge::BridgePort::recvRetry()
{
inRetry = false;
Tick nextReady = sendQueue.front()->ready;
- if (nextReady <= curTick)
+ if (nextReady <= curTick())
trySend();
else
schedule(sendEvent, nextReady);
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index 39399017c..c84d9fc5e 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -142,10 +142,10 @@ Bus::calcPacketTiming(PacketPtr pkt)
// a cycle boundary to take up only the following cycle. Anything
// that happens later will have to "wait" for the end of that
// cycle, and then start using the bus after that.
- if (tickNextIdle < curTick) {
- tickNextIdle = curTick;
+ if (tickNextIdle < curTick()) {
+ tickNextIdle = curTick();
if (tickNextIdle % clock != 0)
- tickNextIdle = curTick - (curTick % clock) + clock;
+ tickNextIdle = curTick() - (curTick() % clock) + clock;
}
Tick headerTime = tickNextIdle + headerCycles * clock;
@@ -181,7 +181,7 @@ void Bus::occupyBus(Tick until)
reschedule(busIdle, tickNextIdle, true);
DPRINTF(Bus, "The bus is now occupied from tick %d to %d\n",
- curTick, tickNextIdle);
+ curTick(), tickNextIdle);
}
/** Function called by the port when the bus is receiving a Timing
@@ -205,7 +205,7 @@ Bus::recvTiming(PacketPtr pkt)
// If the bus is busy, or other devices are in line ahead of the current
// one, put this device on the retry list.
if (!pkt->isExpressSnoop() &&
- (tickNextIdle > curTick ||
+ (tickNextIdle > curTick() ||
(retryList.size() && (!inRetry || src_port != retryList.front()))))
{
addToRetryList(src_port);
@@ -295,7 +295,7 @@ void
Bus::recvRetry(int id)
{
// If there's anything waiting, and the bus isn't busy...
- if (retryList.size() && curTick >= tickNextIdle) {
+ if (retryList.size() && curTick() >= tickNextIdle) {
//retryingPort = retryList.front();
inRetry = true;
DPRINTF(Bus, "Sending a retry to %s\n", retryList.front()->getPeer()->name());
@@ -308,7 +308,7 @@ Bus::recvRetry(int id)
inRetry = false;
//Bring tickNextIdle up to the present
- while (tickNextIdle < curTick)
+ while (tickNextIdle < curTick())
tickNextIdle += clock;
//Burn a cycle for the missed grant.
@@ -318,7 +318,7 @@ Bus::recvRetry(int id)
}
}
//If we weren't able to drain before, we might be able to now.
- if (drainEvent && retryList.size() == 0 && curTick >= tickNextIdle) {
+ if (drainEvent && retryList.size() == 0 && curTick() >= tickNextIdle) {
drainEvent->process();
// Clear the drain event once we're done with it.
drainEvent = NULL;
@@ -435,7 +435,7 @@ Bus::recvAtomic(PacketPtr pkt)
}
// why do we have this packet field and the return value both???
- pkt->finishTime = curTick + response_latency;
+ pkt->finishTime = curTick() + response_latency;
return response_latency;
}
@@ -649,7 +649,7 @@ Bus::drain(Event * de)
//We should check that we're not "doing" anything, and that noone is
//waiting. We might be idle but have someone waiting if the device we
//contacted for a retry didn't actually retry.
- if (retryList.size() || (curTick < tickNextIdle && busIdle.scheduled())) {
+ if (retryList.size() || (curTick() < tickNextIdle && busIdle.scheduled())) {
drainEvent = de;
return 1;
}
@@ -659,8 +659,8 @@ Bus::drain(Event * de)
void
Bus::startup()
{
- if (tickNextIdle < curTick)
- tickNextIdle = (curTick / clock) * clock + clock;
+ if (tickNextIdle < curTick())
+ tickNextIdle = (curTick() / clock) * clock + clock;
}
Bus *
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 70bc51cda..9166e1a09 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -124,7 +124,7 @@ BaseCache::CachePort::clearBlocked()
mustSendRetry = false;
SendRetryEvent *ev = new SendRetryEvent(this, true);
// @TODO: need to find a better time (next bus cycle?)
- schedule(ev, curTick + 1);
+ schedule(ev, curTick() + 1);
}
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 867d77121..e8a644296 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -434,7 +434,7 @@ class BaseCache : public MemObject
uint8_t flag = 1 << cause;
if (blocked == 0) {
blocked_causes[cause]++;
- blockedCycle = curTick;
+ blockedCycle = curTick();
cpuSidePort->setBlocked();
}
blocked |= flag;
@@ -454,7 +454,7 @@ class BaseCache : public MemObject
blocked &= ~flag;
DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
if (blocked == 0) {
- blocked_cycles[cause] += curTick - blockedCycle;
+ blocked_cycles[cause] += curTick() - blockedCycle;
cpuSidePort->clearBlocked();
}
}
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index bf78a2268..6be09597c 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -89,7 +89,7 @@ class CacheBlk
/** The current status of this block. @sa CacheBlockStatusBits */
State status;
- /** Which curTick will this block be accessable */
+ /** Which curTick() will this block be accessable */
Tick whenReady;
/**
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index c5b7ca065..e4e4a3c92 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -412,7 +412,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
// MemDebug::cacheAccess(pkt);
// we charge hitLatency for doing just about anything here
- Tick time = curTick + hitLatency;
+ Tick time = curTick() + hitLatency;
if (pkt->isResponse()) {
// must be cache-to-cache response from upper to lower level
@@ -504,7 +504,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
if (satisfied) {
if (needsResponse) {
pkt->makeTimingResponse();
- cpuSidePort->respond(pkt, curTick+lat);
+ cpuSidePort->respond(pkt, curTick()+lat);
} else {
delete pkt;
}
@@ -532,7 +532,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
noTargetMSHR = mshr;
setBlocked(Blocked_NoTargets);
// need to be careful with this... if this mshr isn't
- // ready yet (i.e. time > curTick_, we don't want to
+ // ready yet (i.e. time > curTick()_, we don't want to
// move it ahead of mshrs that are ready
// mshrQueue.moveToFront(mshr);
}
@@ -816,7 +816,7 @@ template<class TagStore>
void
Cache<TagStore>::handleResponse(PacketPtr pkt)
{
- Tick time = curTick + hitLatency;
+ Tick time = curTick() + hitLatency;
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
bool is_error = pkt->isError();
@@ -848,7 +848,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
MSHR::Target *initial_tgt = mshr->getTarget();
BlkType *blk = tags->findBlock(pkt->getAddr());
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
- Tick miss_latency = curTick - initial_tgt->recvTime;
+ Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks;
if (pkt->req->isUncacheable()) {
@@ -1159,7 +1159,7 @@ doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
// invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
- memSidePort->respond(pkt, curTick + hitLatency);
+ memSidePort->respond(pkt, curTick() + hitLatency);
}
template<class TagStore>
@@ -1430,7 +1430,7 @@ Cache<TagStore>::getNextMSHR()
// (hwpf_mshr_misses)
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
// Don't request bus, since we already have it
- return allocateMissBuffer(pkt, curTick, false);
+ return allocateMissBuffer(pkt, curTick(), false);
}
}
}
@@ -1461,7 +1461,7 @@ Cache<TagStore>::getTimingPacket()
pkt = new Packet(tgt_pkt);
pkt->cmd = MemCmd::UpgradeFailResp;
pkt->senderState = mshr;
- pkt->firstWordTime = pkt->finishTime = curTick;
+ pkt->firstWordTime = pkt->finishTime = curTick();
handleResponse(pkt);
return NULL;
} else if (mshr->isForwardNoResponse()) {
@@ -1679,7 +1679,7 @@ Cache<TagStore>::MemSidePort::sendPacket()
// @TODO: need to facotr in prefetch requests here somehow
if (nextReady != MaxTick) {
DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
- schedule(sendEvent, std::max(nextReady, curTick + 1));
+ schedule(sendEvent, std::max(nextReady, curTick() + 1));
} else {
// no more to send right now: if we're draining, we may be done
if (drainEvent && !sendEvent->scheduled()) {
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 54977346f..292c11c6b 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -333,7 +333,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// Actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here.
PacketPtr cp_pkt = new Packet(pkt, true);
- targets->add(cp_pkt, curTick, _order, Target::FromSnoop,
+ targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
downstreamPending && targets->needsExclusive);
++ntargets;
@@ -378,7 +378,7 @@ MSHR::promoteDeferredTargets()
deferredTargets->resetFlags();
order = targets->front().order;
- readyTime = std::max(curTick, targets->front().readyTime);
+ readyTime = std::max(curTick(), targets->front().readyTime);
return true;
}
diff --git a/src/mem/cache/mshr.hh b/src/mem/cache/mshr.hh
index 9b55e70ef..7920ad717 100644
--- a/src/mem/cache/mshr.hh
+++ b/src/mem/cache/mshr.hh
@@ -72,7 +72,7 @@ class MSHR : public Packet::SenderState, public Printable
Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
Source _source, bool _markedPending)
- : recvTime(curTick), readyTime(_readyTime), order(_order),
+ : recvTime(curTick()), readyTime(_readyTime), order(_order),
pkt(_pkt), source(_source), markedPending(_markedPending)
{}
};
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index d8c495679..5a8739fc7 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -199,7 +199,7 @@ class MSHRQueue
*/
MSHR *getNextMSHR() const
{
- if (readyList.empty() || readyList.front()->readyTime > curTick) {
+ if (readyList.empty() || readyList.front()->readyTime > curTick()) {
return NULL;
}
return readyList.front();
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index 4d1c2175f..873883c1b 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -220,7 +220,7 @@ FALRU::findVictim(Addr addr, PacketList &writebacks)
blk->isTouched = true;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
}
//assert(check());
diff --git a/src/mem/cache/tags/iic.cc b/src/mem/cache/tags/iic.cc
index 1315a17ee..743c6894f 100644
--- a/src/mem/cache/tags/iic.cc
+++ b/src/mem/cache/tags/iic.cc
@@ -257,8 +257,8 @@ IIC::accessBlock(Addr addr, int &lat, int context_src)
hitDepthTotal += sets[set].depth;
tag_ptr->status |= BlkReferenced;
lat = set_lat;
- if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
- lat = tag_ptr->whenReady - curTick;
+ if (tag_ptr->whenReady > curTick() && tag_ptr->whenReady - curTick() > set_lat) {
+ lat = tag_ptr->whenReady - curTick();
}
tag_ptr->refCount += 1;
@@ -437,7 +437,7 @@ IIC::getFreeTag(int set, PacketList & writebacks)
tagsInUse++;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
return tag_ptr;
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
index 8a8b0d0d6..25e98d293 100644
--- a/src/mem/cache/tags/lru.cc
+++ b/src/mem/cache/tags/lru.cc
@@ -126,9 +126,9 @@ LRU::accessBlock(Addr addr, int &lat, int context_src)
sets[set].moveToHead(blk);
DPRINTF(CacheRepl, "set %x: moving blk %x to MRU\n",
set, regenerateBlkAddr(tag, set));
- if (blk->whenReady > curTick
- && blk->whenReady - curTick > hitLatency) {
- lat = blk->whenReady - curTick;
+ if (blk->whenReady > curTick()
+ && blk->whenReady - curTick() > hitLatency) {
+ lat = blk->whenReady - curTick();
}
blk->refCount += 1;
}
@@ -180,7 +180,7 @@ LRU::insertBlock(Addr addr, BlkType *blk, int context_src)
blk->isTouched = true;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
}
diff --git a/src/mem/dram.cc b/src/mem/dram.cc
index ff01ab1dc..6253f9828 100644
--- a/src/mem/dram.cc
+++ b/src/mem/dram.cc
@@ -414,14 +414,14 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
int SD_BEST_T_WRITE_READ_OBANK = (war_lat -1); /* WAR, row miss/hit, another bank */
int SD_BEST_T_WRITE_WRITE_OBANK = 0; /* WAW, row miss/hit, another bank */
- Tick time_since_last_access = curTick-time_last_access;
+ Tick time_since_last_access = curTick()-time_last_access;
Tick time_last_miss = 0; // used for keeping track of times between activations (page misses)
- //int was_idle = (curTick > busy_until);
+ //int was_idle = (curTick() > busy_until);
bool srow_flag = false;
int timing_correction = 0;
- int was_idle = (curTick > busy_until[current_bank]);
- cycles_nCKE[0] += was_idle ? MIN(curTick-busy_until[current_bank], time_since_last_access) : 0;
+ int was_idle = (curTick() > busy_until[current_bank]);
+ cycles_nCKE[0] += was_idle ? MIN(curTick()-busy_until[current_bank], time_since_last_access) : 0;
// bank is precharged
//active_row[current_bank] == DR_NUM_ROWS
@@ -441,7 +441,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if(all_precharged) {
if(was_idle) {
- cycles_all_precharge_nCKE[0] += MIN(curTick-busy_until[current_bank], time_since_last_access);
+ cycles_all_precharge_nCKE[0] += MIN(curTick()-busy_until[current_bank], time_since_last_access);
cycles_all_precharge_CKE[0] += MIN(0, busy_until[current_bank]-time_last_access);
}
else {
@@ -449,7 +449,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
} else { // some bank is active
if(was_idle) {
- cycles_bank_active_nCKE[0] += MIN(curTick-busy_until[current_bank], time_since_last_access);
+ cycles_bank_active_nCKE[0] += MIN(curTick()-busy_until[current_bank], time_since_last_access);
}
else {
}
@@ -462,7 +462,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
- time_last_access = curTick;
+ time_last_access = curTick();
////////////////////////////////////////////////////////////////////////////
if ((mem_type == "SDRAM") && (mem_actpolicy == "open"))
@@ -516,7 +516,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (memctrlpipe_enable == true)
{
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
}
else overlap = 0;
@@ -529,7 +529,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
corrected_overlap = (int) (overlap/cpu_ratio);
}
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d ",(double)busy_until, (double)curTick, overlap, corrected_overlap); debugging*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d ",(double)busy_until, (double)curTick(), overlap, corrected_overlap); debugging*/
if (cmdIsRead == lastCmdIsRead)/*same command*/
{
@@ -889,25 +889,25 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
{
if (memctrlpipe_enable == true)
{
- busy_until[current_bank]=curTick+lat+
+ busy_until[current_bank]=curTick()+lat+
timing_correction;
}
else
{
- if (busy_until[current_bank] >= curTick)
+ if (busy_until[current_bank] >= curTick())
{
busy_until[current_bank]+=(lat+
timing_correction);
total_arb_latency += (busy_until[current_bank]
- - curTick - lat
+ - curTick() - lat
- timing_correction);
- lat=busy_until[current_bank] - curTick;
+ lat=busy_until[current_bank] - curTick();
}
- else busy_until[current_bank]=curTick+lat+
+ else busy_until[current_bank]=curTick()+lat+
timing_correction;
}
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=(lat+
timing_correction);
@@ -1001,7 +1001,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (memctrlpipe_enable == true)
{
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
}
else overlap=0;
@@ -1014,7 +1014,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
corrected_overlap = (int) (overlap/cpu_ratio);
}
- /*fprintf(stderr,"%10.0f %10.0f %6d %6d %2d %2d ",(double)busy_until, (double)curTick, overlap, corrected_overlap,precharge,adjacent);debugging*/
+ /*fprintf(stderr,"%10.0f %10.0f %6d %6d %2d %2d ",(double)busy_until, (double)curTick(), overlap, corrected_overlap,precharge,adjacent);debugging*/
if (cmdIsRead == lastCmdIsRead)/*same command*/
{
@@ -2013,19 +2013,19 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
{
if (memctrlpipe_enable == true)
{
- busy_until[current_bank] =curTick+lat;
+ busy_until[current_bank] =curTick()+lat;
}
else
{
- if (busy_until[current_bank] >= curTick)
+ if (busy_until[current_bank] >= curTick())
{
busy_until[current_bank] +=lat;
- lat=busy_until[current_bank] - curTick;
+ lat=busy_until[current_bank] - curTick();
}
- else busy_until[current_bank] = curTick+lat;
+ else busy_until[current_bank] = curTick()+lat;
}
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2073,7 +2073,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
if (current_bank == last_bank)/*same bank*/
{
@@ -2206,9 +2206,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else /*the memory request will be satisfied temp cycles after curTick*/
+ else /*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2223,7 +2223,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick, overlap, lat);debug*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick(), overlap, lat);debug*/
// if((_cpu_num < num_cpus) && (_cpu_num >= 0))
// cout <<"cpu id = " << _cpu_num << "current_bank = " << current_bank << endl;
// bank_access_profile[_cpu_num][current_bank]++;
@@ -2269,7 +2269,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
if (cpu_ratio < 1.0)
{
@@ -2432,16 +2432,16 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
lat+=overlap;
}
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick, overlap, lat);*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick(), overlap, lat);*/
if (cmdIsRead)
{
@@ -2494,7 +2494,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
total_access++;
lat += chunks;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
lastCmdIsRead=cmdIsRead;
if (cpu_ratio < 1.0)
@@ -2509,9 +2509,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2543,7 +2543,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
lat = DR_T_RCD + DR_T_CWD + DR_T_PACKET; /* DR_T_RP + */
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
lat += chunks * DR_T_PACKET; /*every 128 bit need DR_NUM_CYCLES*/
if (cpu_ratio < 1.0)
@@ -2560,9 +2560,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] += lat;
command_overlapping++;
diff --git a/src/mem/mport.cc b/src/mem/mport.cc
index 564c560c6..80393c81e 100644
--- a/src/mem/mport.cc
+++ b/src/mem/mport.cc
@@ -50,7 +50,7 @@ MessagePort::recvAtomic(PacketPtr pkt)
void
MessagePort::sendMessageTiming(PacketPtr pkt, Tick latency)
{
- schedSendTiming(pkt, curTick + latency);
+ schedSendTiming(pkt, curTick() + latency);
}
Tick
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 41edef8a7..19fff7e3a 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -496,7 +496,7 @@ class Packet : public FastAlloc, public Printable
*/
Packet(Request *_req, MemCmd _cmd, NodeID _dest)
: flags(VALID_DST), cmd(_cmd), req(_req), data(NULL),
- dest(_dest), time(curTick), senderState(NULL)
+ dest(_dest), time(curTick()), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr();
@@ -515,7 +515,7 @@ class Packet : public FastAlloc, public Printable
*/
Packet(Request *_req, MemCmd _cmd, NodeID _dest, int _blkSize)
: flags(VALID_DST), cmd(_cmd), req(_req), data(NULL),
- dest(_dest), time(curTick), senderState(NULL)
+ dest(_dest), time(curTick()), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr() & ~(_blkSize - 1);
@@ -536,7 +536,7 @@ class Packet : public FastAlloc, public Printable
: cmd(pkt->cmd), req(pkt->req),
data(pkt->flags.isSet(STATIC_DATA) ? pkt->data : NULL),
addr(pkt->addr), size(pkt->size), src(pkt->src), dest(pkt->dest),
- time(curTick), senderState(pkt->senderState)
+ time(curTick()), senderState(pkt->senderState)
{
if (!clearFlags)
flags.set(pkt->flags & COPY_FLAGS);
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 38daea266..ec1b8ba29 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -145,7 +145,7 @@ class Request : public FastAlloc
/**
* The time this request was started. Used to calculate
- * latencies. This field is set to curTick any time paddr or vaddr
+ * latencies. This field is set to curTick() any time paddr or vaddr
* is written.
*/
Tick _time;
@@ -179,7 +179,7 @@ class Request : public FastAlloc
/**
* Constructor for physical (e.g. device) requests. Initializes
- * just physical address, size, flags, and timestamp (to curTick).
+ * just physical address, size, flags, and timestamp (to curTick()).
* These fields are adequate to perform a request.
*/
Request(Addr paddr, int size, Flags flags)
@@ -240,7 +240,7 @@ class Request : public FastAlloc
void
setPhys(Addr paddr, int size, Flags flags)
{
- setPhys(paddr, size, flags, curTick);
+ setPhys(paddr, size, flags, curTick());
}
/**
@@ -255,7 +255,7 @@ class Request : public FastAlloc
_vaddr = vaddr;
_size = size;
_pc = pc;
- _time = curTick;
+ _time = curTick();
_flags.clear(~STICKY_FLAGS);
_flags.set(flags);
diff --git a/src/mem/ruby/eventqueue/RubyEventQueue.hh b/src/mem/ruby/eventqueue/RubyEventQueue.hh
index 6fa8b0ac3..3e2bc3f89 100644
--- a/src/mem/ruby/eventqueue/RubyEventQueue.hh
+++ b/src/mem/ruby/eventqueue/RubyEventQueue.hh
@@ -71,7 +71,7 @@ class RubyEventQueue : public EventManager
RubyEventQueue(EventQueue* eventq, Tick _clock);
~RubyEventQueue();
- Time getTime() const { return curTick/m_clock; }
+ Time getTime() const { return curTick()/m_clock; }
Tick getClock() const { return m_clock; }
void scheduleEvent(Consumer* consumer, Time timeDelta);
void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index f707af36f..ea1ff04f0 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -318,7 +318,7 @@ bool
RubyPort::M5Port::sendTiming(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick + (1 * g_eventQueue_ptr->getClock()));
+ schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
@@ -326,7 +326,7 @@ bool
RubyPort::PioPort::sendTiming(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick + (1 * g_eventQueue_ptr->getClock()));
+ schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 1a0f8a66a..6357980f2 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -132,7 +132,7 @@ Sequencer::wakeup()
// If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
- curTick);
+ curTick());
}
}
@@ -223,7 +223,7 @@ Sequencer::insertRequest(SequencerRequest* request)
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
- schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
+ schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
Address line_addr(request->ruby_request.paddr);
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index a6d0d87d6..a704724ac 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -159,7 +159,7 @@ RubySystem::unserialize(Checkpoint *cp, const string &section)
//
// The main purpose for clearing stats in the unserialize process is so
// that the profiler can correctly set its start time to the unserialized
- // value of curTick
+ // value of curTick()
//
clearStats();
}
diff --git a/src/mem/tport.cc b/src/mem/tport.cc
index 4e89544e3..61f9e143c 100644
--- a/src/mem/tport.cc
+++ b/src/mem/tport.cc
@@ -95,7 +95,7 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
- schedSendTiming(pkt, curTick + latency);
+ schedSendTiming(pkt, curTick() + latency);
} else {
delete pkt;
}
@@ -107,8 +107,8 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
void
SimpleTimingPort::schedSendTiming(PacketPtr pkt, Tick when)
{
- assert(when > curTick);
- assert(when < curTick + SimClock::Int::ms);
+ assert(when > curTick());
+ assert(when < curTick() + SimClock::Int::ms);
// Nothing is on the list: add it and schedule an event
if (transmitList.empty() || when < transmitList.front().tick) {
@@ -152,7 +152,7 @@ SimpleTimingPort::sendDeferredPacket()
if (success) {
if (!transmitList.empty() && !sendEvent->scheduled()) {
Tick time = transmitList.front().tick;
- schedule(sendEvent, time <= curTick ? curTick+1 : time);
+ schedule(sendEvent, time <= curTick() ? curTick()+1 : time);
}
if (transmitList.empty() && drainEvent && !sendEvent->scheduled()) {
diff --git a/src/mem/tport.hh b/src/mem/tport.hh
index 7dfe60b72..91a8ab9a5 100644
--- a/src/mem/tport.hh
+++ b/src/mem/tport.hh
@@ -100,7 +100,7 @@ class SimpleTimingPort : public Port
/** Check whether we have a packet ready to go on the transmit list. */
bool deferredPacketReady()
- { return !transmitList.empty() && transmitList.front().tick <= curTick; }
+ { return !transmitList.empty() && transmitList.front().tick <= curTick(); }
Tick deferredPacketReadyTime()
{ return transmitList.empty() ? MaxTick : transmitList.front().tick; }
@@ -129,7 +129,7 @@ class SimpleTimingPort : public Port
/** Attempt to send the packet at the head of the deferred packet
* list. Caller must guarantee that the deferred packet list is
- * non-empty and that the head packet is scheduled for curTick (or
+ * non-empty and that the head packet is scheduled for curTick() (or
* earlier).
*/
void sendDeferredPacket();
diff --git a/src/python/m5/simulate.py b/src/python/m5/simulate.py
index 196f3b6b4..cbcc00497 100644
--- a/src/python/m5/simulate.py
+++ b/src/python/m5/simulate.py
@@ -127,7 +127,7 @@ def simulate(*args, **kwargs):
# Export curTick to user script.
def curTick():
- return internal.core.cvar.curTick
+ return internal.core.curTick()
# Python exit handlers happen in reverse order. We want to dump stats last.
atexit.register(internal.stats.dump)
diff --git a/src/python/swig/core.i b/src/python/swig/core.i
index 7201b03d8..b8f70039d 100644
--- a/src/python/swig/core.i
+++ b/src/python/swig/core.i
@@ -73,8 +73,7 @@ const bool flag_TRACING_ON;
void setClockFrequency(Tick ticksPerSecond);
-%immutable curTick;
-Tick curTick;
+Tick curTick();
class Checkpoint;
diff --git a/src/python/swig/stats.i b/src/python/swig/stats.i
index 67ce72173..1f1130937 100644
--- a/src/python/swig/stats.i
+++ b/src/python/swig/stats.i
@@ -37,6 +37,7 @@
#include "base/statistics.hh"
#include "base/stats/mysql.hh"
#include "base/stats/text.hh"
+#include "sim/core.hh"
#include "sim/stat_control.hh"
%}
@@ -51,7 +52,7 @@ void initMySQL(std::string host, std::string database, std::string user,
std::string sample);
void schedStatEvent(bool dump, bool reset,
- Tick when = curTick, Tick repeat = 0);
+ Tick when = curTick(), Tick repeat = 0);
void enable();
void prepare();
diff --git a/src/sim/core.cc b/src/sim/core.cc
index 1b7a034f0..ab75e1d9a 100644
--- a/src/sim/core.cc
+++ b/src/sim/core.cc
@@ -38,10 +38,10 @@
using namespace std;
-Tick curTick = 0;
+Tick _curTick = 0;
namespace SimClock {
-/// The simulated frequency of curTick. (In ticks per second)
+/// The simulated frequency of curTick(). (In ticks per second)
Tick Frequency;
namespace Float {
diff --git a/src/sim/core.hh b/src/sim/core.hh
index 074ce32b6..64640957e 100644
--- a/src/sim/core.hh
+++ b/src/sim/core.hh
@@ -37,11 +37,15 @@
#include "base/types.hh"
/// The universal simulation clock.
-extern Tick curTick;
+extern Tick _curTick;
+
+inline Tick curTick() { return _curTick; }
+inline void curTick(Tick newVal) { _curTick = newVal; }
+
const Tick retryTime = 1000;
namespace SimClock {
-/// The simulated frequency of curTick.
+/// The simulated frequency of curTick().
extern Tick Frequency;
namespace Float {
diff --git a/src/sim/eventq.cc b/src/sim/eventq.cc
index d46061064..71036fe7e 100644
--- a/src/sim/eventq.cc
+++ b/src/sim/eventq.cc
@@ -309,7 +309,7 @@ void
EventQueue::dump() const
{
cprintf("============================================================\n");
- cprintf("EventQueue Dump (cycle %d)\n", curTick);
+ cprintf("EventQueue Dump (cycle %d)\n", curTick());
cprintf("------------------------------------------------------------\n");
if (empty())
diff --git a/src/sim/eventq.hh b/src/sim/eventq.hh
index 3869ca287..e28c43bb7 100644
--- a/src/sim/eventq.hh
+++ b/src/sim/eventq.hh
@@ -136,7 +136,7 @@ class Event : public Serializable, public FastAlloc
queue = q;
#endif
#ifdef EVENTQ_DEBUG
- whenScheduled = curTick;
+ whenScheduled = curTick();
#endif
}
@@ -254,7 +254,7 @@ class Event : public Serializable, public FastAlloc
queue = NULL;
#endif
#ifdef EVENTQ_DEBUG
- whenCreated = curTick;
+ whenCreated = curTick();
whenScheduled = 0;
#endif
}
@@ -405,15 +405,15 @@ class EventQueue : public Serializable
}
}
- // default: process all events up to 'now' (curTick)
- void serviceEvents() { serviceEvents(curTick); }
+ // default: process all events up to 'now' (curTick())
+ void serviceEvents() { serviceEvents(curTick()); }
// return true if no events are queued
bool empty() const { return head == NULL; }
void dump() const;
- Tick nextEventTime() { return empty() ? curTick : head->when(); }
+ Tick nextEventTime() { return empty() ? curTick() : head->when(); }
bool debugVerify() const;
@@ -486,7 +486,7 @@ class EventManager
inline void
EventQueue::schedule(Event *event, Tick when)
{
- assert((UTick)when >= (UTick)curTick);
+ assert((UTick)when >= (UTick)curTick());
assert(!event->scheduled());
assert(event->initialized());
@@ -523,7 +523,7 @@ EventQueue::deschedule(Event *event)
inline void
EventQueue::reschedule(Event *event, Tick when, bool always)
{
- assert(when >= curTick);
+ assert(when >= curTick());
assert(always || event->scheduled());
assert(event->initialized());
diff --git a/src/sim/init.cc b/src/sim/init.cc
index bd835917e..a9aa905ee 100644
--- a/src/sim/init.cc
+++ b/src/sim/init.cc
@@ -75,7 +75,7 @@ exitNowHandler(int sigtype)
void
abortHandler(int sigtype)
{
- ccprintf(cerr, "Program aborted at cycle %d\n", curTick);
+ ccprintf(cerr, "Program aborted at cycle %d\n", curTick());
}
/*
diff --git a/src/sim/pseudo_inst.cc b/src/sim/pseudo_inst.cc
index df6ba1781..9ee473014 100644
--- a/src/sim/pseudo_inst.cc
+++ b/src/sim/pseudo_inst.cc
@@ -95,7 +95,7 @@ quiesceNs(ThreadContext *tc, uint64_t ns)
EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent();
- Tick resume = curTick + SimClock::Int::ns * ns;
+ Tick resume = curTick() + SimClock::Int::ns * ns;
cpu->reschedule(quiesceEvent, resume, true);
@@ -117,7 +117,7 @@ quiesceCycles(ThreadContext *tc, uint64_t cycles)
EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent();
- Tick resume = curTick + cpu->ticks(cycles);
+ Tick resume = curTick() + cpu->ticks(cycles);
cpu->reschedule(quiesceEvent, resume, true);
@@ -141,7 +141,7 @@ quiesceTime(ThreadContext *tc)
uint64_t
rpns(ThreadContext *tc)
{
- return curTick / SimClock::Int::ns;
+ return curTick() / SimClock::Int::ns;
}
void
@@ -156,7 +156,7 @@ wakeCPU(ThreadContext *tc, uint64_t cpuid)
void
m5exit(ThreadContext *tc, Tick delay)
{
- Tick when = curTick + delay * SimClock::Int::ns;
+ Tick when = curTick() + delay * SimClock::Int::ns;
exitSimLoop("m5_exit instruction encountered", 0, when);
}
@@ -233,7 +233,7 @@ resetstats(ThreadContext *tc, Tick delay, Tick period)
return;
- Tick when = curTick + delay * SimClock::Int::ns;
+ Tick when = curTick() + delay * SimClock::Int::ns;
Tick repeat = period * SimClock::Int::ns;
Stats::schedStatEvent(false, true, when, repeat);
@@ -246,7 +246,7 @@ dumpstats(ThreadContext *tc, Tick delay, Tick period)
return;
- Tick when = curTick + delay * SimClock::Int::ns;
+ Tick when = curTick() + delay * SimClock::Int::ns;
Tick repeat = period * SimClock::Int::ns;
Stats::schedStatEvent(true, false, when, repeat);
@@ -259,7 +259,7 @@ dumpresetstats(ThreadContext *tc, Tick delay, Tick period)
return;
- Tick when = curTick + delay * SimClock::Int::ns;
+ Tick when = curTick() + delay * SimClock::Int::ns;
Tick repeat = period * SimClock::Int::ns;
Stats::schedStatEvent(true, true, when, repeat);
@@ -271,7 +271,7 @@ m5checkpoint(ThreadContext *tc, Tick delay, Tick period)
if (!tc->getCpuPtr()->params()->do_checkpoint_insts)
return;
- Tick when = curTick + delay * SimClock::Int::ns;
+ Tick when = curTick() + delay * SimClock::Int::ns;
Tick repeat = period * SimClock::Int::ns;
exitSimLoop("checkpoint", 0, when, repeat);
diff --git a/src/sim/serialize.cc b/src/sim/serialize.cc
index aa343d0e9..b050027e2 100644
--- a/src/sim/serialize.cc
+++ b/src/sim/serialize.cc
@@ -400,7 +400,7 @@ void
Globals::serialize(ostream &os)
{
nameOut(os);
- SERIALIZE_SCALAR(curTick);
+ SERIALIZE_SCALAR(curTick());
nameOut(os, "MainEventQueue");
mainEventQueue.serialize(os);
@@ -410,7 +410,9 @@ void
Globals::unserialize(Checkpoint *cp)
{
const string &section = name();
- UNSERIALIZE_SCALAR(curTick);
+ Tick tick;
+ paramIn(cp, section, "curTick", tick);
+ curTick(tick);
mainEventQueue.unserialize(cp, "MainEventQueue");
}
@@ -533,10 +535,10 @@ string Checkpoint::currentDirectory;
string
Checkpoint::setDir(const string &name)
{
- // use csprintf to insert curTick into directory name if it
+ // use csprintf to insert curTick() into directory name if it
// appears to have a format placeholder in it.
currentDirectory = (name.find("%") != string::npos) ?
- csprintf(name, curTick) : name;
+ csprintf(name, curTick()) : name;
if (currentDirectory[currentDirectory.size() - 1] != '/')
currentDirectory += "/";
return currentDirectory;
diff --git a/src/sim/serialize.hh b/src/sim/serialize.hh
index d785605f3..5ea632ea4 100644
--- a/src/sim/serialize.hh
+++ b/src/sim/serialize.hh
@@ -250,7 +250,7 @@ class Checkpoint
public:
// Set the current directory. This function takes care of
- // inserting curTick if there's a '%d' in the argument, and
+ // inserting curTick() if there's a '%d' in the argument, and
// appends a '/' if necessary. The final name is returned.
static std::string setDir(const std::string &base_name);
diff --git a/src/sim/sim_events.cc b/src/sim/sim_events.cc
index 6d743cac6..aac844429 100644
--- a/src/sim/sim_events.cc
+++ b/src/sim/sim_events.cc
@@ -66,7 +66,7 @@ SimLoopExitEvent::process()
// but if you are doing this on intervals, don't forget to make another
if (repeat) {
assert(getFlags(IsMainQueue));
- mainEventQueue.schedule(this, curTick + repeat);
+ mainEventQueue.schedule(this, curTick() + repeat);
}
}
diff --git a/src/sim/sim_exit.hh b/src/sim/sim_exit.hh
index 589f6993e..da71cc912 100644
--- a/src/sim/sim_exit.hh
+++ b/src/sim/sim_exit.hh
@@ -47,10 +47,10 @@ class SimLoopExitEvent;
void registerExitCallback(Callback *);
/// Schedule an event to exit the simulation loop (returning to
-/// Python) at the end of the current cycle (curTick). The message
+/// Python) at the end of the current cycle (curTick()). The message
/// and exit_code parameters are saved in the SimLoopExitEvent to
/// indicate why the exit occurred.
void exitSimLoop(const std::string &message, int exit_code = 0,
- Tick when = curTick, Tick repeat = 0);
+ Tick when = curTick(), Tick repeat = 0);
#endif // __SIM_EXIT_HH__
diff --git a/src/sim/sim_object.hh b/src/sim/sim_object.hh
index 567288e96..d57a56ab8 100644
--- a/src/sim/sim_object.hh
+++ b/src/sim/sim_object.hh
@@ -131,7 +131,7 @@ class SimObject : public EventManager, public Serializable
/**
* startup() is the final initialization call before simulation.
* All state is initialized (including unserialized state, if any,
- * such as the curTick value), so this is the appropriate place to
+ * such as the curTick() value), so this is the appropriate place to
* schedule initial event(s) for objects that need them.
*/
virtual void startup();
diff --git a/src/sim/simulate.cc b/src/sim/simulate.cc
index de33cce1c..5e69f0ff4 100644
--- a/src/sim/simulate.cc
+++ b/src/sim/simulate.cc
@@ -47,14 +47,14 @@
SimLoopExitEvent *
simulate(Tick num_cycles)
{
- inform("Entering event queue @ %d. Starting simulation...\n", curTick);
+ inform("Entering event queue @ %d. Starting simulation...\n", curTick());
if (num_cycles < 0)
fatal("simulate: num_cycles must be >= 0 (was %d)\n", num_cycles);
- else if (curTick + num_cycles < 0) //Overflow
+ else if (curTick() + num_cycles < 0) //Overflow
num_cycles = MaxTick;
else
- num_cycles = curTick + num_cycles;
+ num_cycles = curTick() + num_cycles;
Event *limit_event =
new SimLoopExitEvent("simulate() limit reached", 0);
@@ -64,12 +64,12 @@ simulate(Tick num_cycles)
// there should always be at least one event (the SimLoopExitEvent
// we just scheduled) in the queue
assert(!mainEventQueue.empty());
- assert(curTick <= mainEventQueue.nextTick() &&
+ assert(curTick() <= mainEventQueue.nextTick() &&
"event scheduled in the past");
// forward current cycle to the time of the first event on the
// queue
- curTick = mainEventQueue.nextTick();
+ curTick(mainEventQueue.nextTick());
Event *exit_event = mainEventQueue.serviceOne();
if (exit_event != NULL) {
// hit some kind of exit event; return to Python
diff --git a/src/sim/stat_control.cc b/src/sim/stat_control.cc
index 5985eb0a4..f2c7c8a2e 100644
--- a/src/sim/stat_control.cc
+++ b/src/sim/stat_control.cc
@@ -66,7 +66,7 @@ struct SimTicksReset : public Callback
void process()
{
statTime.set();
- startTick = curTick;
+ startTick = curTick();
}
};
@@ -81,7 +81,7 @@ statElapsedTime()
Tick
statElapsedTicks()
{
- return curTick - startTick;
+ return curTick() - startTick;
}
SimTicksReset simTicksReset;
@@ -189,7 +189,7 @@ class StatEvent : public Event
Stats::reset();
if (repeat) {
- Stats::schedStatEvent(dump, reset, curTick + repeat, repeat);
+ Stats::schedStatEvent(dump, reset, curTick() + repeat, repeat);
}
}
};
diff --git a/src/sim/stat_control.hh b/src/sim/stat_control.hh
index ac3c44960..c41a9482a 100644
--- a/src/sim/stat_control.hh
+++ b/src/sim/stat_control.hh
@@ -34,7 +34,7 @@
namespace Stats {
void initSimStats();
-void schedStatEvent(bool dump, bool reset, Tick when = curTick,
+void schedStatEvent(bool dump, bool reset, Tick when = curTick(),
Tick repeat = 0);
} // namespace Stats
diff --git a/src/sim/syscall_emul.cc b/src/sim/syscall_emul.cc
index 60bb59790..e0469744e 100644
--- a/src/sim/syscall_emul.cc
+++ b/src/sim/syscall_emul.cc
@@ -59,7 +59,7 @@ SyscallDesc::doSyscall(int callnum, LiveProcess *process, ThreadContext *tc)
#endif
DPRINTFR(SyscallVerbose,
"%d: %s: syscall %s called w/arguments %d,%d,%d,%d\n",
- curTick, tc->getCpuPtr()->name(), name,
+ curTick(), tc->getCpuPtr()->name(), name,
process->getSyscallArg(tc, index),
process->getSyscallArg(tc, index),
process->getSyscallArg(tc, index),
@@ -68,7 +68,7 @@ SyscallDesc::doSyscall(int callnum, LiveProcess *process, ThreadContext *tc)
SyscallReturn retval = (*funcPtr)(this, callnum, process, tc);
DPRINTFR(SyscallVerbose, "%d: %s: syscall %s returns %d\n",
- curTick,tc->getCpuPtr()->name(), name, retval.value());
+ curTick(),tc->getCpuPtr()->name(), name, retval.value());
if (!(flags & SyscallDesc::SuppressReturnValue))
process->setSyscallReturn(tc, retval);
diff --git a/src/sim/syscall_emul.hh b/src/sim/syscall_emul.hh
index f2847b2e5..1dc51ad56 100644
--- a/src/sim/syscall_emul.hh
+++ b/src/sim/syscall_emul.hh
@@ -363,7 +363,7 @@ template <class T1, class T2>
void
getElapsedTime(T1 &sec, T2 &usec)
{
- int elapsed_usecs = curTick / SimClock::Int::us;
+ int elapsed_usecs = curTick() / SimClock::Int::us;
sec = elapsed_usecs / one_million;
usec = elapsed_usecs % one_million;
}
@@ -1190,7 +1190,7 @@ timesFunc(SyscallDesc *desc, int callnum, LiveProcess *process,
TypedBufferArg<typename OS::tms> bufp(process->getSyscallArg(tc, index));
// Fill in the time structure (in clocks)
- int64_t clocks = curTick * OS::M5_SC_CLK_TCK / SimClock::Int::s;
+ int64_t clocks = curTick() * OS::M5_SC_CLK_TCK / SimClock::Int::s;
bufp->tms_utime = clocks;
bufp->tms_stime = 0;
bufp->tms_cutime = 0;
diff --git a/src/unittest/stattest.cc b/src/unittest/stattest.cc
index 7c7b116a8..0c9a07c00 100644
--- a/src/unittest/stattest.cc
+++ b/src/unittest/stattest.cc
@@ -332,7 +332,7 @@ main(int argc, char *argv[])
s15[9].sample(1234);
s10.sample(1000000000);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s10.sample(100000);
s10.sample(100000);
s10.sample(100000);
@@ -407,59 +407,59 @@ main(int argc, char *argv[])
s15[0].sample(1234);
s15[1].sample(4134);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[4].sample(1213);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[3].sample(1124);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[2].sample(1243);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[7].sample(1244);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[4].sample(7234);
s15[2].sample(9234);
s15[3].sample(1764);
s15[7].sample(1564);
s15[3].sample(3234);
s15[1].sample(2234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[5].sample(1234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[9].sample(4334);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[2].sample(1234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[4].sample(4334);
s15[6].sample(1234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[8].sample(8734);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[1].sample(5234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[3].sample(8234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[7].sample(5234);
s15[4].sample(4434);
s15[3].sample(7234);
s15[2].sample(1934);
s15[1].sample(9234);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[5].sample(5634);
s15[3].sample(1264);
s15[7].sample(5223);
s15[0].sample(1234);
s15[0].sample(5434);
s15[3].sample(8634);
- curTick += ULL(1000000);
+ curTick() += ULL(1000000);
s15[1].sample(1234);
- s4 = curTick;
+ s4 = curTick();
s8[3] = 99999;
s3 = 12;
s3++;
- curTick += 9;
+ curTick() += 9;
s1 = 9;
s1 += 9;
@@ -534,8 +534,8 @@ main(int argc, char *argv[])
s9.sample(10);
s9.sample(10);
- curTick += 9;
- s4 = curTick;
+ curTick() += 9;
+ s4 = curTick();
s6.sample(100);
s6.sample(100);
s6.sample(100);