diff options
author | Andreas Hansson <andreas.hansson@arm.com> | 2012-08-28 14:30:33 -0400 |
---|---|---|
committer | Andreas Hansson <andreas.hansson@arm.com> | 2012-08-28 14:30:33 -0400 |
commit | 0cacf7e8178defce4063b7cfc8a592c595f56fa2 (patch) | |
tree | ac2a57952c3d8b87b1a2d0190d26ab149c12f65e /src | |
parent | d53d04473e0d6ca1765f1117072eec59187a7f7b (diff) | |
download | gem5-0cacf7e8178defce4063b7cfc8a592c595f56fa2.tar.xz |
Clock: Add a Cycles wrapper class and use where applicable
This patch addresses the comments and feedback on the preceding patch
that reworks the clocks and now more clearly shows where cycles
(relative cycle counts) are used to express time.
Instead of bumping the existing patch I chose to make this a separate
patch, merely to try and focus the discussion around a smaller set of
changes. The two patches will be pushed together though.
This changes done as part of this patch are mostly following directly
from the introduction of the wrapper class, and change enough code to
make things compile and run again. There are definitely more places
where int/uint/Tick is still used to represent cycles, and it will
take some time to chase them all down. Similarly, a lot of parameters
should be changed from Param.Tick and Param.Unsigned to
Param.Cycles.
In addition, the use of curTick is questionable as there should not be
an absolute cycle. Potential solutions can be built on top of this
patch. There is a similar situation in the o3 CPU where
lastRunningCycle is currently counting in Cycles, and is still an
absolute time. More discussion to be had in other words.
An additional change that would be appropriate in the future is to
perform a similar wrapping of Tick and probably also introduce a
Ticks class along with suitable operators for all these classes.
Diffstat (limited to 'src')
83 files changed, 340 insertions, 256 deletions
diff --git a/src/arch/alpha/mmapped_ipr.hh b/src/arch/alpha/mmapped_ipr.hh index 6c3403b33..24f7ce335 100644 --- a/src/arch/alpha/mmapped_ipr.hh +++ b/src/arch/alpha/mmapped_ipr.hh @@ -44,14 +44,14 @@ class ThreadContext; namespace AlphaISA { -inline Tick +inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { panic("No handleIprRead implementation in Alpha\n"); } -inline Tick +inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { panic("No handleIprWrite implementation in Alpha\n"); diff --git a/src/arch/alpha/utility.hh b/src/arch/alpha/utility.hh index a9b5c4cba..1cd19cc95 100644 --- a/src/arch/alpha/utility.hh +++ b/src/arch/alpha/utility.hh @@ -67,7 +67,8 @@ void zeroRegisters(TC *tc); // Alpha IPR register accessors inline bool PcPAL(Addr addr) { return addr & 0x3; } -inline void startupCPU(ThreadContext *tc, int cpuId) { tc->activate(0); } +inline void startupCPU(ThreadContext *tc, int cpuId) +{ tc->activate(Cycles(0)); } //////////////////////////////////////////////////////////////////////// // diff --git a/src/arch/arm/mmapped_ipr.hh b/src/arch/arm/mmapped_ipr.hh index 0f90ac35d..474aacbcf 100644 --- a/src/arch/arm/mmapped_ipr.hh +++ b/src/arch/arm/mmapped_ipr.hh @@ -46,13 +46,13 @@ class ThreadContext; namespace ArmISA { -inline Tick +inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprRead in ARM\n"); } -inline Tick +inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprWrite in ARM\n"); diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index ffa193fbe..77cc662b3 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -240,15 +240,16 @@ TableWalker::processWalk() if (currState->timing) { port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t), &doL1DescEvent, (uint8_t*)&currState->l1Desc.data, - currState->tc->getCpuPtr()->ticks(1), flag); - DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", + currState->tc->getCpuPtr()->clockPeriod(), flag); + DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before " + "adding: %d\n", stateQueueL1.size()); stateQueueL1.push_back(currState); currState = NULL; } else if (!currState->functional) { port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t), NULL, (uint8_t*)&currState->l1Desc.data, - currState->tc->getCpuPtr()->ticks(1), flag); + currState->tc->getCpuPtr()->clockPeriod(), flag); doL1Descriptor(); f = currState->fault; } else { @@ -588,12 +589,12 @@ TableWalker::doL1Descriptor() if (currState->timing) { currState->delayed = true; port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t), - &doL2DescEvent, (uint8_t*)&currState->l2Desc.data, - currState->tc->getCpuPtr()->ticks(1)); + &doL2DescEvent, (uint8_t*)&currState->l2Desc.data, + currState->tc->getCpuPtr()->clockPeriod()); } else if (!currState->functional) { port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t), - NULL, (uint8_t*)&currState->l2Desc.data, - currState->tc->getCpuPtr()->ticks(1)); + NULL, (uint8_t*)&currState->l2Desc.data, + currState->tc->getCpuPtr()->clockPeriod()); doL2Descriptor(); } else { RequestPtr req = new Request(l2desc_addr, sizeof(uint32_t), 0, @@ -758,7 +759,7 @@ void TableWalker::nextWalk(ThreadContext *tc) { if (pendingQueue.size()) - schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(1)); + schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(Cycles(1))); } diff --git a/src/arch/arm/utility.hh b/src/arch/arm/utility.hh index b3b400e3c..e4fc658e0 100644 --- a/src/arch/arm/utility.hh +++ b/src/arch/arm/utility.hh @@ -102,7 +102,7 @@ void zeroRegisters(TC *tc); inline void startupCPU(ThreadContext *tc, int cpuId) { - tc->activate(0); + tc->activate(Cycles(0)); } void copyRegs(ThreadContext *src, ThreadContext *dest); diff --git a/src/arch/mips/isa.cc b/src/arch/mips/isa.cc index 6a525ed3a..f6de102cd 100644 --- a/src/arch/mips/isa.cc +++ b/src/arch/mips/isa.cc @@ -482,7 +482,7 @@ ISA::setMiscReg(int misc_reg, const MiscReg &val, miscRegFile[misc_reg][reg_sel] = cp0_val; - scheduleCP0Update(tc->getCpuPtr(), 1); + scheduleCP0Update(tc->getCpuPtr(), Cycles(1)); } /** @@ -511,14 +511,14 @@ ISA::filterCP0Write(int misc_reg, int reg_sel, const MiscReg &val) } void -ISA::scheduleCP0Update(BaseCPU *cpu, int delay) +ISA::scheduleCP0Update(BaseCPU *cpu, Cycles delay) { if (!cp0Updated) { cp0Updated = true; //schedule UPDATE CP0Event *cp0_event = new CP0Event(this, cpu, UpdateCP0); - cpu->schedule(cp0_event, curTick() + cpu->ticks(delay)); + cpu->schedule(cp0_event, cpu->clockEdge(delay)); } } @@ -573,9 +573,9 @@ ISA::CP0Event::description() const } void -ISA::CP0Event::scheduleEvent(int delay) +ISA::CP0Event::scheduleEvent(Cycles delay) { - cpu->reschedule(this, curTick() + cpu->ticks(delay), true); + cpu->reschedule(this, cpu->clockEdge(delay), true); } void diff --git a/src/arch/mips/isa.hh b/src/arch/mips/isa.hh index 720c7725e..a313b4382 100644 --- a/src/arch/mips/isa.hh +++ b/src/arch/mips/isa.hh @@ -136,14 +136,14 @@ namespace MipsISA const char *description() const; /** Schedule This Event */ - void scheduleEvent(int delay); + void scheduleEvent(Cycles delay); /** Unschedule This Event */ void unscheduleEvent(); }; // Schedule a CP0 Update Event - void scheduleCP0Update(BaseCPU *cpu, int delay = 0); + void scheduleCP0Update(BaseCPU *cpu, Cycles delay = Cycles(0)); // If any changes have been made, then check the state for changes // and if necessary alert the CPU diff --git a/src/arch/mips/mmapped_ipr.hh b/src/arch/mips/mmapped_ipr.hh index 14d6e3f42..4c84d05f2 100644 --- a/src/arch/mips/mmapped_ipr.hh +++ b/src/arch/mips/mmapped_ipr.hh @@ -45,13 +45,13 @@ class ThreadContext; namespace MipsISA { -inline Tick +inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprRead in MIPS\n"); } -inline Tick +inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprWrite in MIPS\n"); diff --git a/src/arch/mips/mt.hh b/src/arch/mips/mt.hh index f163d3240..02e98a170 100755 --- a/src/arch/mips/mt.hh +++ b/src/arch/mips/mt.hh @@ -96,7 +96,7 @@ restoreThread(TC *tc) // TODO: SET PC WITH AN EVENT INSTEAD OF INSTANTANEOUSLY tc->pcState(restartPC); - tc->activate(0); + tc->activate(Cycles(0)); warn("%i: Restoring thread %i in %s @ PC %x", curTick(), tc->threadId(), tc->getCpuPtr()->name(), restartPC); diff --git a/src/arch/mips/utility.cc b/src/arch/mips/utility.cc index 65432b4ea..f84819756 100644 --- a/src/arch/mips/utility.cc +++ b/src/arch/mips/utility.cc @@ -231,7 +231,7 @@ zeroRegisters(CPU *cpu) void startupCPU(ThreadContext *tc, int cpuId) { - tc->activate(0/*tc->threadId()*/); + tc->activate(Cycles(0)); } void diff --git a/src/arch/power/mmapped_ipr.hh b/src/arch/power/mmapped_ipr.hh index a55ef8f7d..142253462 100644 --- a/src/arch/power/mmapped_ipr.hh +++ b/src/arch/power/mmapped_ipr.hh @@ -49,13 +49,13 @@ class ThreadContext; namespace PowerISA { -inline Tick +inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprRead in POWER\n"); } -inline Tick +inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { panic("No implementation for handleIprWrite in POWER\n"); diff --git a/src/arch/power/utility.hh b/src/arch/power/utility.hh index c3868c189..8d9f97436 100644 --- a/src/arch/power/utility.hh +++ b/src/arch/power/utility.hh @@ -59,7 +59,7 @@ void zeroRegisters(TC *tc); inline void startupCPU(ThreadContext *tc, int cpuId) { - tc->activate(0); + tc->activate(Cycles(0)); } void diff --git a/src/arch/sparc/mmapped_ipr.hh b/src/arch/sparc/mmapped_ipr.hh index c13fdc910..153944e9d 100644 --- a/src/arch/sparc/mmapped_ipr.hh +++ b/src/arch/sparc/mmapped_ipr.hh @@ -44,13 +44,13 @@ namespace SparcISA { -inline Tick +inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { return xc->getDTBPtr()->doMmuRegRead(xc, pkt); } -inline Tick +inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { return xc->getDTBPtr()->doMmuRegWrite(xc, pkt); diff --git a/src/arch/sparc/tlb.cc b/src/arch/sparc/tlb.cc index 37f1479b0..9faf297d6 100644 --- a/src/arch/sparc/tlb.cc +++ b/src/arch/sparc/tlb.cc @@ -848,7 +848,7 @@ TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode) return NoFault; } -Tick +Cycles TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt) { Addr va = pkt->getAddr(); @@ -1030,10 +1030,10 @@ doMmuReadError: (uint32_t)asi, va); } pkt->makeAtomicResponse(); - return tc->getCpuPtr()->ticks(1); + return Cycles(1); } -Tick +Cycles TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) { uint64_t data = pkt->get<uint64_t>(); @@ -1283,7 +1283,7 @@ doMmuWriteError: (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data); } pkt->makeAtomicResponse(); - return tc->getCpuPtr()->ticks(1); + return Cycles(1); } void diff --git a/src/arch/sparc/tlb.hh b/src/arch/sparc/tlb.hh index 89a049a8b..abbe2df3c 100644 --- a/src/arch/sparc/tlb.hh +++ b/src/arch/sparc/tlb.hh @@ -168,8 +168,8 @@ class TLB : public BaseTLB * does not support the Checker model at the moment */ Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode); - Tick doMmuRegRead(ThreadContext *tc, Packet *pkt); - Tick doMmuRegWrite(ThreadContext *tc, Packet *pkt); + Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt); + Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt); void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs); // Checkpointing diff --git a/src/arch/sparc/ua2005.cc b/src/arch/sparc/ua2005.cc index 5948e0713..d3708d861 100644 --- a/src/arch/sparc/ua2005.cc +++ b/src/arch/sparc/ua2005.cc @@ -114,7 +114,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc) if (!(tick_cmpr & ~mask(63)) && time > 0) { if (tickCompare->scheduled()) cpu->deschedule(tickCompare); - cpu->schedule(tickCompare, curTick() + time * cpu->ticks(1)); + cpu->schedule(tickCompare, cpu->clockEdge(Cycles(time))); } panic("writing to TICK compare register %#X\n", val); break; @@ -130,7 +130,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc) if (!(stick_cmpr & ~mask(63)) && time > 0) { if (sTickCompare->scheduled()) cpu->deschedule(sTickCompare); - cpu->schedule(sTickCompare, curTick() + time * cpu->ticks(1)); + cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(time))); } DPRINTF(Timer, "writing to sTICK compare register value %#X\n", val); break; @@ -200,7 +200,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc) if (!(hstick_cmpr & ~mask(63)) && time > 0) { if (hSTickCompare->scheduled()) cpu->deschedule(hSTickCompare); - cpu->schedule(hSTickCompare, curTick() + time * cpu->ticks(1)); + cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(time))); } DPRINTF(Timer, "writing to hsTICK compare register value %#X\n", val); break; @@ -329,19 +329,19 @@ ISA::processSTickCompare(ThreadContext *tc) // since our microcode instructions take two cycles we need to check if // we're actually at the correct cycle or we need to wait a little while // more - int ticks; - ticks = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) - + int delay; + delay = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) - cpu->instCount(); - assert(ticks >= 0 && "stick compare missed interrupt cycle"); + assert(delay >= 0 && "stick compare missed interrupt cycle"); - if (ticks == 0 || tc->status() == ThreadContext::Suspended) { + if (delay == 0 || tc->status() == ThreadContext::Suspended) { DPRINTF(Timer, "STick compare cycle reached at %#x\n", (stick_cmpr & mask(63))); if (!(tc->readMiscRegNoEffect(MISCREG_STICK_CMPR) & (ULL(1) << 63))) { setMiscReg(MISCREG_SOFTINT, softint | (ULL(1) << 16), tc); } } else { - cpu->schedule(sTickCompare, curTick() + ticks * cpu->ticks(1)); + cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(delay))); } } @@ -353,15 +353,15 @@ ISA::processHSTickCompare(ThreadContext *tc) // since our microcode instructions take two cycles we need to check if // we're actually at the correct cycle or we need to wait a little while // more - int ticks; + int delay; if ( tc->status() == ThreadContext::Halted) return; - ticks = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) - + delay = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) - cpu->instCount(); - assert(ticks >= 0 && "hstick compare missed interrupt cycle"); + assert(delay >= 0 && "hstick compare missed interrupt cycle"); - if (ticks == 0 || tc->status() == ThreadContext::Suspended) { + if (delay == 0 || tc->status() == ThreadContext::Suspended) { DPRINTF(Timer, "HSTick compare cycle reached at %#x\n", (stick_cmpr & mask(63))); if (!(tc->readMiscRegNoEffect(MISCREG_HSTICK_CMPR) & (ULL(1) << 63))) { @@ -369,7 +369,7 @@ ISA::processHSTickCompare(ThreadContext *tc) } // Need to do something to cause interrupt to happen here !!! @todo } else { - cpu->schedule(hSTickCompare, curTick() + ticks * cpu->ticks(1)); + cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(delay))); } } diff --git a/src/arch/sparc/utility.hh b/src/arch/sparc/utility.hh index b8e3b3f0e..285a40c26 100644 --- a/src/arch/sparc/utility.hh +++ b/src/arch/sparc/utility.hh @@ -77,7 +77,7 @@ startupCPU(ThreadContext *tc, int cpuId) { // Other CPUs will get activated by IPIs if (cpuId == 0 || !FullSystem) - tc->activate(0); + tc->activate(Cycles(0)); } void copyRegs(ThreadContext *src, ThreadContext *dest); diff --git a/src/arch/x86/mmapped_ipr.hh b/src/arch/x86/mmapped_ipr.hh index f17b64cad..02c125171 100644 --- a/src/arch/x86/mmapped_ipr.hh +++ b/src/arch/x86/mmapped_ipr.hh @@ -53,7 +53,7 @@ namespace X86ISA { - inline Tick + inline Cycles handleIprRead(ThreadContext *xc, Packet *pkt) { Addr offset = pkt->getAddr() & mask(3); @@ -62,10 +62,10 @@ namespace X86ISA // Make sure we don't trot off the end of data. assert(offset + pkt->getSize() <= sizeof(MiscReg)); pkt->setData(((uint8_t *)&data) + offset); - return 1; + return Cycles(1); } - inline Tick + inline Cycles handleIprWrite(ThreadContext *xc, Packet *pkt) { Addr offset = pkt->getAddr() & mask(3); @@ -76,7 +76,7 @@ namespace X86ISA assert(offset + pkt->getSize() <= sizeof(MiscReg)); pkt->writeData(((uint8_t *)&data) + offset); xc->setMiscReg(index, gtoh(data)); - return 1; + return Cycles(1); } } diff --git a/src/arch/x86/utility.cc b/src/arch/x86/utility.cc index acca97c49..65c1a9d32 100644 --- a/src/arch/x86/utility.cc +++ b/src/arch/x86/utility.cc @@ -176,7 +176,7 @@ void initCPU(ThreadContext *tc, int cpuId) // @todo: Control the relative frequency, in this case 16:1, of // the clocks in the Python code - interrupts->setClock(tc->getCpuPtr()->ticks(16)); + interrupts->setClock(tc->getCpuPtr()->clockPeriod() * 16); // TODO Set the SMRAM base address (SMBASE) to 0x00030000 @@ -189,12 +189,12 @@ void initCPU(ThreadContext *tc, int cpuId) void startupCPU(ThreadContext *tc, int cpuId) { if (cpuId == 0 || !FullSystem) { - tc->activate(0); + tc->activate(Cycles(0)); } else { // This is an application processor (AP). It should be initialized to // look like only the BIOS POST has run on it and put then put it into // a halted state. - tc->suspend(0); + tc->suspend(Cycles(0)); } } diff --git a/src/base/types.hh b/src/base/types.hh index ba6d53ad7..4caf92c97 100644 --- a/src/base/types.hh +++ b/src/base/types.hh @@ -39,6 +39,8 @@ #include <inttypes.h> +#include <cassert> + /** uint64_t constant */ #define ULL(N) ((uint64_t)N##ULL) /** int64_t constant */ @@ -58,6 +60,61 @@ typedef uint64_t Tick; const Tick MaxTick = ULL(0xffffffffffffffff); /** + * Cycles is a wrapper class for representing cycle counts, i.e. a + * relative difference between two points in time, expressed in a + * number of clock cycles. + * + * The Cycles wrapper class is a type-safe alternative to a + * typedef, aiming to avoid unintentional mixing of cycles and ticks + * in the code base. + * + * Operators are defined inside an ifndef block to avoid swig touching + * them. Note that there is no overloading of the bool operator as the + * compiler is allowed to turn booleans into integers and this causes + * a whole range of issues in a handful locations. The solution to + * this problem would be to use the safe bool idiom, but for now we + * make do without the test and use the more elaborate comparison > + * Cycles(0). + */ +class Cycles +{ + + private: + + /** Member holding the actual value. */ + uint64_t c; + + public: + + /** Explicit constructor assigning a value. */ + explicit Cycles(uint64_t _c) : c(_c) { } + +#ifndef SWIG // keep the operators away from SWIG + + /** Converting back to the value type. */ + operator uint64_t() const { return c; } + + /** Prefix increment operator. */ + Cycles& operator++() + { ++c; return *this; } + + /** Prefix decrement operator. Is only temporarily used in the O3 CPU. */ + Cycles& operator--() + { assert(c != 0); --c; return *this; } + + /** In-place addition of cycles. */ + const Cycles& operator+=(const Cycles& cc) + { c += cc.c; return *this; } + + /** Greater than comparison used for > Cycles(0). */ + bool operator>(const Cycles& cc) const + { return c > cc.c; } + +#endif // SWIG not touching operators + +}; + +/** * Address type * This will probably be moved somewhere else in the near future. * This should be at least as big as the biggest address width in use diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py index a4ad2fe5a..c27fd1c27 100644 --- a/src/cpu/BaseCPU.py +++ b/src/cpu/BaseCPU.py @@ -139,8 +139,8 @@ class BaseCPU(MemObject): "terminate when all threads have reached this load count") max_loads_any_thread = Param.Counter(0, "terminate when any thread reaches this load count") - progress_interval = Param.Tick(0, - "interval to print out the progress message") + progress_interval = Param.Frequency('0Hz', + "frequency to print out the progress message") defer_registration = Param.Bool(False, "defer registration with system (for sampling)") diff --git a/src/cpu/base.hh b/src/cpu/base.hh index aab6ac4ca..82864ae7b 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -246,7 +246,7 @@ class BaseCPU : public MemObject /// Notify the CPU that the indicated context is now active. The /// delay parameter indicates the number of ticks to wait before /// executing (typically 0 or 1). - virtual void activateContext(ThreadID thread_num, int delay) {} + virtual void activateContext(ThreadID thread_num, Cycles delay) {} /// Notify the CPU that the indicated context is now suspended. virtual void suspendContext(ThreadID thread_num) {} diff --git a/src/cpu/checker/thread_context.hh b/src/cpu/checker/thread_context.hh index 0da73a137..967f15572 100644 --- a/src/cpu/checker/thread_context.hh +++ b/src/cpu/checker/thread_context.hh @@ -156,13 +156,14 @@ class CheckerThreadContext : public ThreadContext /// Set the status to Active. Optional delay indicates number of /// cycles to wait before beginning execution. - void activate(int delay = 1) { actualTC->activate(delay); } + void activate(Cycles delay = Cycles(1)) + { actualTC->activate(delay); } /// Set the status to Suspended. - void suspend(int delay) { actualTC->suspend(delay); } + void suspend(Cycles delay) { actualTC->suspend(delay); } /// Set the status to Halted. - void halt(int delay) { actualTC->halt(delay); } + void halt(Cycles delay) { actualTC->halt(delay); } void dumpFuncProfile() { actualTC->dumpFuncProfile(); } diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc index ec06f19f0..3dad7d1f4 100644 --- a/src/cpu/inorder/cpu.cc +++ b/src/cpu/inorder/cpu.cc @@ -209,7 +209,7 @@ InOrderCPU::CPUEvent::description() const } void -InOrderCPU::CPUEvent::scheduleEvent(int delay) +InOrderCPU::CPUEvent::scheduleEvent(Cycles delay) { assert(!scheduled() || squashed()); cpu->reschedule(this, cpu->clockEdge(delay), true); @@ -407,7 +407,7 @@ InOrderCPU::InOrderCPU(Params *params) lockFlag = false; // Schedule First Tick Event, CPU will reschedule itself from here on out. - scheduleTickEvent(0); + scheduleTickEvent(Cycles(0)); } InOrderCPU::~InOrderCPU() @@ -769,9 +769,9 @@ InOrderCPU::tick() } else { //Tick next_tick = curTick() + cycles(1); //tickEvent.schedule(next_tick); - schedule(&tickEvent, clockEdge(1)); + schedule(&tickEvent, clockEdge(Cycles(1))); DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n", - clockEdge(1)); + clockEdge(Cycles(1))); } } @@ -877,7 +877,7 @@ InOrderCPU::checkForInterrupts() // Schedule Squash Through-out Resource Pool resPool->scheduleEvent( (InOrderCPU::CPUEventType)ResourcePool::SquashAll, - dummyTrapInst[tid], 0); + dummyTrapInst[tid], Cycles(0)); // Finally, Setup Trap to happen at end of cycle trapContext(interrupt, tid, dummyTrapInst[tid]); @@ -912,7 +912,8 @@ InOrderCPU::processInterrupts(Fault interrupt) } void -InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay) +InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, + Cycles delay) { scheduleCpuEvent(Trap, fault, tid, inst, delay); trapPending[tid] = true; @@ -926,7 +927,8 @@ InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst) } void -InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay) +InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, + Cycles delay) { scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay); } @@ -954,7 +956,7 @@ InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num, void InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault, ThreadID tid, DynInstPtr inst, - unsigned delay, CPUEventPri event_pri) + Cycles delay, CPUEventPri event_pri) { CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst, event_pri); @@ -967,7 +969,8 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault, // Broadcast event to the Resource Pool // Need to reset tid just in case this is a dummy instruction inst->setTid(tid); - resPool->scheduleEvent(c_event, inst, 0, 0, tid); + // @todo: Is this really right? Should the delay not be passed on? + resPool->scheduleEvent(c_event, inst, Cycles(0), 0, tid); } bool @@ -1071,7 +1074,7 @@ InOrderCPU::activateThreadInPipeline(ThreadID tid) } void -InOrderCPU::deactivateContext(ThreadID tid, int delay) +InOrderCPU::deactivateContext(ThreadID tid, Cycles delay) { DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid); @@ -1153,7 +1156,7 @@ InOrderCPU::tickThreadStats() } void -InOrderCPU::activateContext(ThreadID tid, int delay) +InOrderCPU::activateContext(ThreadID tid, Cycles delay) { DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid); @@ -1168,7 +1171,7 @@ InOrderCPU::activateContext(ThreadID tid, int delay) } void -InOrderCPU::activateNextReadyContext(int delay) +InOrderCPU::activateNextReadyContext(Cycles delay) { DPRINTF(InOrderCPU,"Activating next ready thread\n"); @@ -1719,7 +1722,8 @@ InOrderCPU::wakeup() } void -InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay) +InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, + Cycles delay) { // Syscall must be non-speculative, so squash from last stage unsigned squash_stage = NumStages - 1; @@ -1730,7 +1734,8 @@ InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay // Schedule Squash Through-out Resource Pool resPool->scheduleEvent( - (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0); + (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, + Cycles(0)); scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri); } diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh index 9a0a62c87..a0fe834e8 100644 --- a/src/cpu/inorder/cpu.hh +++ b/src/cpu/inorder/cpu.hh @@ -201,7 +201,7 @@ class InOrderCPU : public BaseCPU TickEvent tickEvent; /** Schedule tick event, regardless of its current state. */ - void scheduleTickEvent(int delay) + void scheduleTickEvent(Cycles delay) { assert(!tickEvent.scheduled() || tickEvent.squashed()); reschedule(&tickEvent, clockEdge(delay), true); @@ -279,7 +279,7 @@ class InOrderCPU : public BaseCPU const char *description() const; /** Schedule Event */ - void scheduleEvent(int delay); + void scheduleEvent(Cycles delay); /** Unschedule This Event */ void unscheduleEvent(); @@ -287,7 +287,7 @@ class InOrderCPU : public BaseCPU /** Schedule a CPU Event */ void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid, - DynInstPtr inst, unsigned delay = 0, + DynInstPtr inst, Cycles delay = Cycles(0), CPUEventPri event_pri = InOrderCPU_Pri); public: @@ -479,19 +479,20 @@ class InOrderCPU : public BaseCPU /** Schedule a syscall on the CPU */ void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, - int delay = 0); + Cycles delay = Cycles(0)); /** Executes a syscall.*/ void syscall(int64_t callnum, ThreadID tid); /** Schedule a trap on the CPU */ - void trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay = 0); + void trapContext(Fault fault, ThreadID tid, DynInstPtr inst, + Cycles delay = Cycles(0)); /** Perform trap to Handle Given Fault */ void trap(Fault fault, ThreadID tid, DynInstPtr inst); /** Schedule thread activation on the CPU */ - void activateContext(ThreadID tid, int delay = 0); + void activateContext(ThreadID tid, Cycles delay = Cycles(0)); /** Add Thread to Active Threads List. */ void activateThread(ThreadID tid); @@ -500,13 +501,13 @@ class InOrderCPU : public BaseCPU void activateThreadInPipeline(ThreadID tid); /** Schedule Thread Activation from Ready List */ - void activateNextReadyContext(int delay = 0); + void activateNextReadyContext(Cycles delay = Cycles(0)); /** Add Thread From Ready List to Active Threads List. */ void activateNextReadyThread(); /** Schedule a thread deactivation on the CPU */ - void deactivateContext(ThreadID tid, int delay = 0); + void deactivateContext(ThreadID tid, Cycles delay = Cycles(0)); /** Remove from Active Thread List */ void deactivateThread(ThreadID tid); @@ -529,7 +530,8 @@ class InOrderCPU : public BaseCPU * squashDueToMemStall() - squashes pipeline * @note: maybe squashContext/squashThread would be better? */ - void squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay = 0); + void squashFromMemStall(DynInstPtr inst, ThreadID tid, + Cycles delay = Cycles(0)); void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid); void removePipelineStalls(ThreadID tid); diff --git a/src/cpu/inorder/pipeline_stage.cc b/src/cpu/inorder/pipeline_stage.cc index 4dec38629..d98fbb744 100644 --- a/src/cpu/inorder/pipeline_stage.cc +++ b/src/cpu/inorder/pipeline_stage.cc @@ -556,7 +556,7 @@ PipelineStage::activateThread(ThreadID tid) // prevent "double"-execution of instructions cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType) ResourcePool::UpdateAfterContextSwitch, - inst, 0, 0, tid); + inst, Cycles(0), 0, tid); // Clear switchout buffer switchedOutBuffer[tid] = NULL; diff --git a/src/cpu/inorder/resource.cc b/src/cpu/inorder/resource.cc index 098a4e1b4..c732b8519 100644 --- a/src/cpu/inorder/resource.cc +++ b/src/cpu/inorder/resource.cc @@ -44,7 +44,7 @@ using namespace std; Resource::Resource(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu) + Cycles res_latency, InOrderCPU *_cpu) : resName(res_name), id(res_id), width(res_width), latency(res_latency), cpu(_cpu), resourceEvent(NULL) @@ -76,7 +76,7 @@ Resource::init() // If the resource has a zero-cycle (no latency) // function, then no reason to have events // that will process them for the right tick - if (latency > 0) + if (latency > Cycles(0)) resourceEvent = new ResourceEvent[width]; @@ -296,7 +296,8 @@ Resource::setupSquash(DynInstPtr inst, int stage_num, ThreadID tid) // Schedule Squash Through-out Resource Pool cpu->resPool->scheduleEvent( - (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0); + (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, + Cycles(0)); } void @@ -321,7 +322,7 @@ Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, int req_slot_num = req_ptr->getSlot(); - if (latency > 0) { + if (latency > Cycles(0)) { if (resourceEvent[req_slot_num].scheduled()) unscheduleEvent(req_slot_num); } @@ -362,17 +363,10 @@ Resource::squashThenTrap(int stage_num, DynInstPtr inst) cpu->trapContext(inst->fault, tid, inst); } -Tick -Resource::ticks(int num_cycles) -{ - return cpu->ticks(num_cycles); -} - - void Resource::scheduleExecution(int slot_num) { - if (latency > 0) { + if (latency > Cycles(0)) { scheduleEvent(slot_num, latency); } else { execute(slot_num); @@ -380,17 +374,17 @@ Resource::scheduleExecution(int slot_num) } void -Resource::scheduleEvent(int slot_idx, int delay) +Resource::scheduleEvent(int slot_idx, Cycles delay) { DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n", reqs[slot_idx]->inst->readTid(), reqs[slot_idx]->inst->seqNum, - cpu->ticks(delay) + curTick()); + cpu->clockEdge(delay)); resourceEvent[slot_idx].scheduleEvent(delay); } bool -Resource::scheduleEvent(DynInstPtr inst, int delay) +Resource::scheduleEvent(DynInstPtr inst, Cycles delay) { int slot_idx = findSlot(inst); @@ -521,9 +515,9 @@ ResourceEvent::description() const } void -ResourceEvent::scheduleEvent(int delay) +ResourceEvent::scheduleEvent(Cycles delay) { assert(!scheduled() || squashed()); resource->cpu->reschedule(this, - curTick() + resource->ticks(delay), true); + resource->cpu->clockEdge(delay), true); } diff --git a/src/cpu/inorder/resource.hh b/src/cpu/inorder/resource.hh index 3c1a8cc47..ef712d5c9 100644 --- a/src/cpu/inorder/resource.hh +++ b/src/cpu/inorder/resource.hh @@ -63,7 +63,7 @@ class Resource { public: Resource(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu); + Cycles res_latency, InOrderCPU *_cpu); virtual ~Resource(); @@ -178,11 +178,11 @@ class Resource { int slotsInUse(); /** Schedule resource event, regardless of its current state. */ - void scheduleEvent(int slot_idx, int delay); + void scheduleEvent(int slot_idx, Cycles delay); /** Find instruction in list, Schedule resource event, regardless of its * current state. */ - bool scheduleEvent(DynInstPtr inst, int delay); + bool scheduleEvent(DynInstPtr inst, Cycles delay); /** Unschedule resource event, regardless of its current state. */ void unscheduleEvent(int slot_idx); @@ -190,9 +190,6 @@ class Resource { /** Unschedule resource event, regardless of its current state. */ bool unscheduleEvent(DynInstPtr inst); - /** Return the number of cycles in 'Tick' format */ - Tick ticks(int numCycles); - /** Find the request that corresponds to this instruction */ virtual ResReqPtr findRequest(DynInstPtr inst); @@ -206,7 +203,7 @@ class Resource { /** Return Latency of Resource */ /* Can be overridden for complex cases */ - virtual int getLatency(int slot_num) { return latency; } + virtual Cycles getLatency(int slot_num) { return latency; } protected: /** The name of this resource */ @@ -226,7 +223,7 @@ class Resource { * Note: Dynamic latency resources set this to 0 and * manage the latency themselves */ - const int latency; + const Cycles latency; public: /** List of all Requests the Resource is Servicing. Each request @@ -287,7 +284,7 @@ class ResourceEvent : public Event void setSlot(int slot) { slotIdx = slot; } /** Schedule resource event, regardless of its current state. */ - void scheduleEvent(int delay); + void scheduleEvent(Cycles delay); /** Unschedule resource event, regardless of its current state. */ void unscheduleEvent() diff --git a/src/cpu/inorder/resource_pool.cc b/src/cpu/inorder/resource_pool.cc index 31511314e..c09f6c31d 100644 --- a/src/cpu/inorder/resource_pool.cc +++ b/src/cpu/inorder/resource_pool.cc @@ -64,54 +64,57 @@ ResourcePool::ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params) // name - id - bandwidth - latency - CPU - Parameters // -------------------------------------------------- resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq, - stage_width * 2, 0, _cpu, params)); + stage_width * 2, Cycles(0), + _cpu, params)); // Keep track of the instruction fetch unit so we can easily // provide a pointer to it in the CPU. instUnit = new FetchUnit("icache_port", ICache, - stage_width * 2 + MaxThreads, 0, _cpu, + stage_width * 2 + MaxThreads, Cycles(0), _cpu, params); resources.push_back(instUnit); resources.push_back(new DecodeUnit("decode_unit", Decode, - stage_width, 0, _cpu, params)); + stage_width, Cycles(0), _cpu, + params)); resources.push_back(new BranchPredictor("branch_predictor", BPred, - stage_width, 0, _cpu, params)); + stage_width, Cycles(0), + _cpu, params)); resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4, - 0, _cpu, params)); + Cycles(0), _cpu, params)); resources.push_back(new UseDefUnit("regfile_manager", RegManager, - stage_width * 3, 0, _cpu, + stage_width * 3, Cycles(0), _cpu, params)); resources.push_back(new AGENUnit("agen_unit", AGEN, - stage_width, 0, _cpu, params)); + stage_width, Cycles(0), _cpu, + params)); resources.push_back(new ExecutionUnit("execution_unit", ExecUnit, - stage_width, 0, _cpu, params)); + stage_width, Cycles(0), _cpu, + params)); resources.push_back(new MultDivUnit("mult_div_unit", MDU, - stage_width * 2, - 0, - _cpu, - params)); + stage_width * 2, Cycles(0), + _cpu, params)); // Keep track of the data load/store unit so we can easily provide // a pointer to it in the CPU. dataUnit = new CacheUnit("dcache_port", DCache, - stage_width * 2 + MaxThreads, 0, _cpu, + stage_width * 2 + MaxThreads, Cycles(0), _cpu, params); resources.push_back(dataUnit); gradObjects.push_back(BPred); resources.push_back(new GraduationUnit("graduation_unit", Grad, - stage_width, 0, _cpu, + stage_width, Cycles(0), _cpu, params)); resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4, - 0, _cpu, params)); + Cycles(0), _cpu, params)); } @@ -234,7 +237,7 @@ ResourcePool::slotsInUse(int res_idx) // to the event construction void ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, - int delay, int res_idx, ThreadID tid) + Cycles delay, int res_idx, ThreadID tid) { assert(delay >= 0); @@ -456,7 +459,7 @@ ResourcePool::ResPoolEvent::description() const /** Schedule resource event, regardless of its current state. */ void -ResourcePool::ResPoolEvent::scheduleEvent(int delay) +ResourcePool::ResPoolEvent::scheduleEvent(Cycles delay) { InOrderCPU *cpu = resPool->cpu; assert(!scheduled() || squashed()); diff --git a/src/cpu/inorder/resource_pool.hh b/src/cpu/inorder/resource_pool.hh index 9e0952236..207967d06 100644 --- a/src/cpu/inorder/resource_pool.hh +++ b/src/cpu/inorder/resource_pool.hh @@ -132,7 +132,7 @@ class ResourcePool { const char *description() const; /** Schedule Event */ - void scheduleEvent(int delay); + void scheduleEvent(Cycles delay); /** Unschedule This Event */ void unscheduleEvent(); @@ -206,7 +206,8 @@ class ResourcePool { /** Schedule resource event, regardless of its current state. */ void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL, - int delay = 0, int res_idx = 0, ThreadID tid = 0); + Cycles delay = Cycles(0), int res_idx = 0, + ThreadID tid = 0); /** UnSchedule resource event, regardless of its current state. */ void unscheduleEvent(int res_idx, DynInstPtr inst); diff --git a/src/cpu/inorder/resources/agen_unit.cc b/src/cpu/inorder/resources/agen_unit.cc index 7be8a23f2..f978b2fa7 100644 --- a/src/cpu/inorder/resources/agen_unit.cc +++ b/src/cpu/inorder/resources/agen_unit.cc @@ -33,7 +33,7 @@ #include "debug/InOrderAGEN.hh" AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { } diff --git a/src/cpu/inorder/resources/agen_unit.hh b/src/cpu/inorder/resources/agen_unit.hh index f208ec680..5c67b4c2f 100644 --- a/src/cpu/inorder/resources/agen_unit.hh +++ b/src/cpu/inorder/resources/agen_unit.hh @@ -48,7 +48,8 @@ class AGENUnit : public Resource { public: AGENUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); enum Command { GenerateAddr diff --git a/src/cpu/inorder/resources/branch_predictor.cc b/src/cpu/inorder/resources/branch_predictor.cc index 65b95ff31..004cf8b63 100644 --- a/src/cpu/inorder/resources/branch_predictor.cc +++ b/src/cpu/inorder/resources/branch_predictor.cc @@ -39,8 +39,9 @@ using namespace std; using namespace TheISA; using namespace ThePipeline; -BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, +BranchPredictor::BranchPredictor(std::string res_name, int res_id, + int res_width, Cycles res_latency, + InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu), branchPred(this, params) diff --git a/src/cpu/inorder/resources/branch_predictor.hh b/src/cpu/inorder/resources/branch_predictor.hh index 72b216806..dde340ce7 100644 --- a/src/cpu/inorder/resources/branch_predictor.hh +++ b/src/cpu/inorder/resources/branch_predictor.hh @@ -54,7 +54,8 @@ class BranchPredictor : public Resource { public: BranchPredictor(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); void regStats(); diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc index 21d7bb6e2..e380c79d4 100644 --- a/src/cpu/inorder/resources/cache_unit.cc +++ b/src/cpu/inorder/resources/cache_unit.cc @@ -67,7 +67,8 @@ printMemData(uint8_t *data, unsigned size) #endif CacheUnit::CacheUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu), cachePort(NULL), cachePortBlocked(false) { diff --git a/src/cpu/inorder/resources/cache_unit.hh b/src/cpu/inorder/resources/cache_unit.hh index dda39a7a5..9a7faf9cd 100644 --- a/src/cpu/inorder/resources/cache_unit.hh +++ b/src/cpu/inorder/resources/cache_unit.hh @@ -58,7 +58,8 @@ class CacheUnit : public Resource public: CacheUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); enum Command { InitiateReadData, diff --git a/src/cpu/inorder/resources/decode_unit.cc b/src/cpu/inorder/resources/decode_unit.cc index d0cf7ffb2..7b7eccd0a 100644 --- a/src/cpu/inorder/resources/decode_unit.cc +++ b/src/cpu/inorder/resources/decode_unit.cc @@ -40,7 +40,7 @@ using namespace ThePipeline; using namespace std; DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { diff --git a/src/cpu/inorder/resources/decode_unit.hh b/src/cpu/inorder/resources/decode_unit.hh index 084c0008f..65f82a94b 100644 --- a/src/cpu/inorder/resources/decode_unit.hh +++ b/src/cpu/inorder/resources/decode_unit.hh @@ -48,7 +48,8 @@ class DecodeUnit : public Resource { public: DecodeUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); enum Command { DecodeInst diff --git a/src/cpu/inorder/resources/execution_unit.cc b/src/cpu/inorder/resources/execution_unit.cc index 16f737308..296d5126f 100644 --- a/src/cpu/inorder/resources/execution_unit.cc +++ b/src/cpu/inorder/resources/execution_unit.cc @@ -44,7 +44,7 @@ using namespace std; using namespace ThePipeline; ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu), lastExecuteTick(0), lastControlTick(0) diff --git a/src/cpu/inorder/resources/execution_unit.hh b/src/cpu/inorder/resources/execution_unit.hh index bebb69ca3..e87a05c27 100644 --- a/src/cpu/inorder/resources/execution_unit.hh +++ b/src/cpu/inorder/resources/execution_unit.hh @@ -51,7 +51,8 @@ class ExecutionUnit : public Resource { public: ExecutionUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); public: void regStats(); diff --git a/src/cpu/inorder/resources/fetch_seq_unit.cc b/src/cpu/inorder/resources/fetch_seq_unit.cc index 6bab9ea50..3d3e3cc9b 100644 --- a/src/cpu/inorder/resources/fetch_seq_unit.cc +++ b/src/cpu/inorder/resources/fetch_seq_unit.cc @@ -40,7 +40,7 @@ using namespace TheISA; using namespace ThePipeline; FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu), instSize(sizeof(MachInst)) diff --git a/src/cpu/inorder/resources/fetch_seq_unit.hh b/src/cpu/inorder/resources/fetch_seq_unit.hh index 1cd0047e2..4cb18a1c7 100644 --- a/src/cpu/inorder/resources/fetch_seq_unit.hh +++ b/src/cpu/inorder/resources/fetch_seq_unit.hh @@ -54,7 +54,8 @@ class FetchSeqUnit : public Resource { public: FetchSeqUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); ~FetchSeqUnit(); void init(); diff --git a/src/cpu/inorder/resources/fetch_unit.cc b/src/cpu/inorder/resources/fetch_unit.cc index 07669ef2a..0ed59fe2d 100644 --- a/src/cpu/inorder/resources/fetch_unit.cc +++ b/src/cpu/inorder/resources/fetch_unit.cc @@ -53,7 +53,7 @@ using namespace TheISA; using namespace ThePipeline; FetchUnit::FetchUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params), instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize) diff --git a/src/cpu/inorder/resources/fetch_unit.hh b/src/cpu/inorder/resources/fetch_unit.hh index 82d5d99e0..d1c7b22c0 100644 --- a/src/cpu/inorder/resources/fetch_unit.hh +++ b/src/cpu/inorder/resources/fetch_unit.hh @@ -53,7 +53,8 @@ class FetchUnit : public CacheUnit { public: FetchUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); virtual ~FetchUnit(); diff --git a/src/cpu/inorder/resources/graduation_unit.cc b/src/cpu/inorder/resources/graduation_unit.cc index c69e55512..ea63527b6 100644 --- a/src/cpu/inorder/resources/graduation_unit.cc +++ b/src/cpu/inorder/resources/graduation_unit.cc @@ -35,7 +35,7 @@ using namespace ThePipeline; GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { diff --git a/src/cpu/inorder/resources/graduation_unit.hh b/src/cpu/inorder/resources/graduation_unit.hh index 836b568a6..69d3322fe 100644 --- a/src/cpu/inorder/resources/graduation_unit.hh +++ b/src/cpu/inorder/resources/graduation_unit.hh @@ -52,7 +52,7 @@ class GraduationUnit : public Resource { public: GraduationUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); void execute(int slot_num); diff --git a/src/cpu/inorder/resources/inst_buffer.cc b/src/cpu/inorder/resources/inst_buffer.cc index d64eb79f1..19011059f 100644 --- a/src/cpu/inorder/resources/inst_buffer.cc +++ b/src/cpu/inorder/resources/inst_buffer.cc @@ -45,7 +45,7 @@ using namespace TheISA; using namespace ThePipeline; InstBuffer::InstBuffer(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { } diff --git a/src/cpu/inorder/resources/inst_buffer.hh b/src/cpu/inorder/resources/inst_buffer.hh index d0047e013..78ef900c6 100644 --- a/src/cpu/inorder/resources/inst_buffer.hh +++ b/src/cpu/inorder/resources/inst_buffer.hh @@ -56,7 +56,8 @@ class InstBuffer : public Resource { public: InstBuffer(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); void regStats(); diff --git a/src/cpu/inorder/resources/mem_dep_unit.hh b/src/cpu/inorder/resources/mem_dep_unit.hh index 387bee0b9..4e512de58 100644 --- a/src/cpu/inorder/resources/mem_dep_unit.hh +++ b/src/cpu/inorder/resources/mem_dep_unit.hh @@ -47,7 +47,7 @@ class MemDepUnit : public Resource { public: MemDepUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu); + Cycles res_latency, InOrderCPU *_cpu); virtual ~MemDepUnit() {} virtual void execute(int slot_num); diff --git a/src/cpu/inorder/resources/mult_div_unit.cc b/src/cpu/inorder/resources/mult_div_unit.cc index ab0081787..5a4d4bb55 100644 --- a/src/cpu/inorder/resources/mult_div_unit.cc +++ b/src/cpu/inorder/resources/mult_div_unit.cc @@ -43,7 +43,7 @@ using namespace std; using namespace ThePipeline; MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu), multRepeatRate(params->multRepeatRate), diff --git a/src/cpu/inorder/resources/mult_div_unit.hh b/src/cpu/inorder/resources/mult_div_unit.hh index 7d179bdce..d855dbb9d 100644 --- a/src/cpu/inorder/resources/mult_div_unit.hh +++ b/src/cpu/inorder/resources/mult_div_unit.hh @@ -56,7 +56,7 @@ class MultDivUnit : public Resource { public: MultDivUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); public: @@ -86,23 +86,23 @@ class MultDivUnit : public Resource { protected: /** Latency & Repeat Rate for Multiply Insts */ unsigned multRepeatRate; - unsigned multLatency; + Cycles multLatency; /** Latency & Repeat Rate for 8-bit Divide Insts */ unsigned div8RepeatRate; - unsigned div8Latency; + Cycles div8Latency; /** Latency & Repeat Rate for 16-bit Divide Insts */ unsigned div16RepeatRate; - unsigned div16Latency; + Cycles div16Latency; /** Latency & Repeat Rate for 24-bit Divide Insts */ unsigned div24RepeatRate; - unsigned div24Latency; + Cycles div24Latency; /** Latency & Repeat Rate for 32-bit Divide Insts */ unsigned div32RepeatRate; - unsigned div32Latency; + Cycles div32Latency; /** Last cycle that MDU was used */ Tick lastMDUCycle; diff --git a/src/cpu/inorder/resources/tlb_unit.cc b/src/cpu/inorder/resources/tlb_unit.cc index c07f6ae5f..c2619f15e 100644 --- a/src/cpu/inorder/resources/tlb_unit.cc +++ b/src/cpu/inorder/resources/tlb_unit.cc @@ -44,7 +44,8 @@ using namespace TheISA; using namespace ThePipeline; TLBUnit::TLBUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { // Hard-Code Selection For Now diff --git a/src/cpu/inorder/resources/tlb_unit.hh b/src/cpu/inorder/resources/tlb_unit.hh index 6846bdc87..916f67559 100644 --- a/src/cpu/inorder/resources/tlb_unit.hh +++ b/src/cpu/inorder/resources/tlb_unit.hh @@ -55,7 +55,8 @@ class TLBUnit : public Resource public: TLBUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); virtual ~TLBUnit() {} void init(); diff --git a/src/cpu/inorder/resources/use_def.cc b/src/cpu/inorder/resources/use_def.cc index 38a2eb040..e10238758 100644 --- a/src/cpu/inorder/resources/use_def.cc +++ b/src/cpu/inorder/resources/use_def.cc @@ -45,7 +45,7 @@ using namespace TheISA; using namespace ThePipeline; UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, + Cycles res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) : Resource(res_name, res_id, res_width, res_latency, _cpu) { @@ -107,7 +107,7 @@ void UseDefUnit::init() { // Set Up Resource Events to Appropriate Resource BandWidth - if (latency > 0) { + if (latency > Cycles(0)) { resourceEvent = new ResourceEvent[width]; } else { resourceEvent = NULL; diff --git a/src/cpu/inorder/resources/use_def.hh b/src/cpu/inorder/resources/use_def.hh index 9581bc5f5..9eb516345 100644 --- a/src/cpu/inorder/resources/use_def.hh +++ b/src/cpu/inorder/resources/use_def.hh @@ -56,7 +56,8 @@ class UseDefUnit : public Resource { public: UseDefUnit(std::string res_name, int res_id, int res_width, - int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); + Cycles res_latency, InOrderCPU *_cpu, + ThePipeline::Params *params); void init(); diff --git a/src/cpu/inorder/thread_context.cc b/src/cpu/inorder/thread_context.cc index 72592c299..16ffd5b0f 100644 --- a/src/cpu/inorder/thread_context.cc +++ b/src/cpu/inorder/thread_context.cc @@ -98,7 +98,7 @@ InOrderThreadContext::takeOverFrom(ThreadContext *old_context) } void -InOrderThreadContext::activate(int delay) +InOrderThreadContext::activate(Cycles delay) { DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n", getThreadNum()); @@ -113,7 +113,7 @@ InOrderThreadContext::activate(int delay) void -InOrderThreadContext::suspend(int delay) +InOrderThreadContext::suspend(Cycles delay) { DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n", getThreadNum()); @@ -126,7 +126,7 @@ InOrderThreadContext::suspend(int delay) } void -InOrderThreadContext::halt(int delay) +InOrderThreadContext::halt(Cycles delay) { DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n", getThreadNum()); diff --git a/src/cpu/inorder/thread_context.hh b/src/cpu/inorder/thread_context.hh index 9b588cde0..2dd55582e 100644 --- a/src/cpu/inorder/thread_context.hh +++ b/src/cpu/inorder/thread_context.hh @@ -165,13 +165,13 @@ class InOrderThreadContext : public ThreadContext /** Set the status to Active. Optional delay indicates number of * cycles to wait before beginning execution. */ - void activate(int delay = 1); + void activate(Cycles delay = Cycles(1)); /** Set the status to Suspended. */ - void suspend(int delay = 0); + void suspend(Cycles delay = Cycles(0)); /** Set the status to Halted. */ - void halt(int delay = 0); + void halt(Cycles delay = Cycles(0)); /** Takes over execution of a thread from another CPU. */ void takeOverFrom(ThreadContext *old_context); @@ -259,7 +259,7 @@ class InOrderThreadContext : public ThreadContext int flattenFloatIndex(int reg) { return cpu->isa[thread->threadId()].flattenFloatIndex(reg); } - void activateContext(int delay) + void activateContext(Cycles delay) { cpu->activateContext(thread->threadId(), delay); } void deallocateContext() diff --git a/src/cpu/o3/commit.hh b/src/cpu/o3/commit.hh index 1119e8b50..d30097553 100644 --- a/src/cpu/o3/commit.hh +++ b/src/cpu/o3/commit.hh @@ -409,7 +409,7 @@ class DefaultCommit /** The latency to handle a trap. Used when scheduling trap * squash event. */ - uint trapLatency; + Cycles trapLatency; /** The interrupt fault. */ Fault interrupt; diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index 95683a77a..fdd45fdda 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -256,7 +256,8 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params) globalSeqNum(1), system(params->system), drainCount(0), - deferRegistration(params->defer_registration) + deferRegistration(params->defer_registration), + lastRunningCycle(curCycle()) { if (!deferRegistration) { _status = Running; @@ -386,8 +387,6 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params) // Setup the ROB for whichever stages need it. commit.setROB(&rob); - lastRunningCycle = curCycle(); - lastActivatedCycle = 0; #if 0 // Give renameMap & rename stage access to the freeList; @@ -629,7 +628,7 @@ FullO3CPU<Impl>::tick() lastRunningCycle = curCycle(); timesIdled++; } else { - schedule(tickEvent, clockEdge(1)); + schedule(tickEvent, clockEdge(Cycles(1))); DPRINTF(O3CPU, "Scheduling next tick!\n"); } } @@ -741,12 +740,12 @@ FullO3CPU<Impl>::totalOps() const template <class Impl> void -FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) +FullO3CPU<Impl>::activateContext(ThreadID tid, Cycles delay) { // Needs to set each stage to running as well. if (delay){ DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate " - "on cycle %d\n", tid, curTick() + ticks(delay)); + "on cycle %d\n", tid, clockEdge(delay)); scheduleActivateThreadEvent(tid, delay); } else { activateThread(tid); @@ -762,7 +761,8 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) activityRec.activity(); fetch.wakeFromQuiesce(); - Tick cycles = curCycle() - lastRunningCycle; + Cycles cycles(curCycle() - lastRunningCycle); + // @todo: This is an oddity that is only here to match the stats if (cycles != 0) --cycles; quiesceCycles += cycles; @@ -776,12 +776,12 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) template <class Impl> bool FullO3CPU<Impl>::scheduleDeallocateContext(ThreadID tid, bool remove, - int delay) + Cycles delay) { // Schedule removal of thread data from CPU if (delay){ DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate " - "on cycle %d\n", tid, curTick() + ticks(delay)); + "on tick %d\n", tid, clockEdge(delay)); scheduleDeallocateContextEvent(tid, remove, delay); return false; } else { @@ -797,7 +797,7 @@ void FullO3CPU<Impl>::suspendContext(ThreadID tid) { DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid); - bool deallocated = scheduleDeallocateContext(tid, false, 1); + bool deallocated = scheduleDeallocateContext(tid, false, Cycles(1)); // If this was the last thread then unschedule the tick event. if ((activeThreads.size() == 1 && !deallocated) || activeThreads.size() == 0) @@ -814,7 +814,7 @@ FullO3CPU<Impl>::haltContext(ThreadID tid) { //For now, this is the same as deallocate DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid); - scheduleDeallocateContext(tid, true, 1); + scheduleDeallocateContext(tid, true, Cycles(1)); } template <class Impl> @@ -854,7 +854,7 @@ FullO3CPU<Impl>::insertThread(ThreadID tid) src_tc->setStatus(ThreadContext::Active); - activateContext(tid,1); + activateContext(tid, Cycles(1)); //Reset ROB/IQ/LSQ Entries commit.rob->resetEntries(); @@ -1672,7 +1672,8 @@ FullO3CPU<Impl>::wakeCPU() DPRINTF(Activity, "Waking up CPU\n"); - Tick cycles = curCycle() - lastRunningCycle; + Cycles cycles(curCycle() - lastRunningCycle); + // @todo: This is an oddity that is only here to match the stats if (cycles != 0) --cycles; idleCycles += cycles; diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index 5910f314d..076cce0fb 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -211,7 +211,7 @@ class FullO3CPU : public BaseO3CPU TickEvent tickEvent; /** Schedule tick event, regardless of its current state. */ - void scheduleTickEvent(int delay) + void scheduleTickEvent(Cycles delay) { if (tickEvent.squashed()) reschedule(tickEvent, clockEdge(delay)); @@ -251,7 +251,7 @@ class FullO3CPU : public BaseO3CPU /** Schedule thread to activate , regardless of its current state. */ void - scheduleActivateThreadEvent(ThreadID tid, int delay) + scheduleActivateThreadEvent(ThreadID tid, Cycles delay) { // Schedule thread to activate, regardless of its current state. if (activateThreadEvent[tid].squashed()) @@ -314,7 +314,7 @@ class FullO3CPU : public BaseO3CPU /** Schedule cpu to deallocate thread context.*/ void - scheduleDeallocateContextEvent(ThreadID tid, bool remove, int delay) + scheduleDeallocateContextEvent(ThreadID tid, bool remove, Cycles delay) { // Schedule thread to activate, regardless of its current state. if (deallocateContextEvent[tid].squashed()) @@ -392,7 +392,7 @@ class FullO3CPU : public BaseO3CPU virtual Counter totalOps() const; /** Add Thread to Active Threads List. */ - void activateContext(ThreadID tid, int delay); + void activateContext(ThreadID tid, Cycles delay); /** Remove Thread from Active Threads List */ void suspendContext(ThreadID tid); @@ -400,7 +400,8 @@ class FullO3CPU : public BaseO3CPU /** Remove Thread from Active Threads List && * Possibly Remove Thread Context from CPU. */ - bool scheduleDeallocateContext(ThreadID tid, bool remove, int delay = 1); + bool scheduleDeallocateContext(ThreadID tid, bool remove, + Cycles delay = Cycles(1)); /** Remove Thread from Active Threads List && * Remove Thread Context from CPU. @@ -748,7 +749,7 @@ class FullO3CPU : public BaseO3CPU std::list<int> cpuWaitList; /** The cycle that the CPU was last running, used for statistics. */ - Tick lastRunningCycle; + Cycles lastRunningCycle; /** The cycle that the CPU was last activated by a new thread*/ Tick lastActivatedCycle; diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index 9caf0c79b..33563f539 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -646,7 +646,8 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req) assert(!finishTranslationEvent.scheduled()); finishTranslationEvent.setFault(fault); finishTranslationEvent.setReq(mem_req); - cpu->schedule(finishTranslationEvent, cpu->clockEdge(1)); + cpu->schedule(finishTranslationEvent, + cpu->clockEdge(Cycles(1))); return; } DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n", diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh index b6c3bd239..a8f14287a 100644 --- a/src/cpu/o3/inst_queue_impl.hh +++ b/src/cpu/o3/inst_queue_impl.hh @@ -828,7 +828,8 @@ InstructionQueue<Impl>::scheduleReadyInsts() FUCompletion *execution = new FUCompletion(issuing_inst, idx, this); - cpu->schedule(execution, cpu->clockEdge(op_latency - 1)); + cpu->schedule(execution, + cpu->clockEdge(Cycles(op_latency - 1))); // @todo: Enforce that issue_latency == 1 or op_latency if (issue_latency > 1) { diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index c567341c7..8eb33c297 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -607,7 +607,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, load_inst->memData = new uint8_t[64]; ThreadContext *thread = cpu->tcBase(lsqID); - Tick delay; + Cycles delay(0); PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); if (!TheISA::HasUnalignedMemAcc || !sreqLow) { @@ -622,7 +622,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); delay = TheISA::handleIprRead(thread, fst_data_pkt); - unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt); + Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt); if (delay2 > delay) delay = delay2; diff --git a/src/cpu/o3/thread_context.hh b/src/cpu/o3/thread_context.hh index 5c236ee0c..520f07b0f 100755 --- a/src/cpu/o3/thread_context.hh +++ b/src/cpu/o3/thread_context.hh @@ -134,13 +134,13 @@ class O3ThreadContext : public ThreadContext /** Set the status to Active. Optional delay indicates number of * cycles to wait before beginning execution. */ - virtual void activate(int delay = 1); + virtual void activate(Cycles delay = Cycles(1)); /** Set the status to Suspended. */ - virtual void suspend(int delay = 0); + virtual void suspend(Cycles delay = Cycles(0)); /** Set the status to Halted. */ - virtual void halt(int delay = 0); + virtual void halt(Cycles delay = Cycles(0)); /** Dumps the function profiling information. * @todo: Implement. diff --git a/src/cpu/o3/thread_context_impl.hh b/src/cpu/o3/thread_context_impl.hh index 13bfe32df..8a8ee636a 100755 --- a/src/cpu/o3/thread_context_impl.hh +++ b/src/cpu/o3/thread_context_impl.hh @@ -102,7 +102,7 @@ O3ThreadContext<Impl>::takeOverFrom(ThreadContext *old_context) template <class Impl> void -O3ThreadContext<Impl>::activate(int delay) +O3ThreadContext<Impl>::activate(Cycles delay) { DPRINTF(O3CPU, "Calling activate on Thread Context %d\n", threadId()); @@ -119,7 +119,7 @@ O3ThreadContext<Impl>::activate(int delay) template <class Impl> void -O3ThreadContext<Impl>::suspend(int delay) +O3ThreadContext<Impl>::suspend(Cycles delay) { DPRINTF(O3CPU, "Calling suspend on Thread Context %d\n", threadId()); @@ -136,7 +136,7 @@ O3ThreadContext<Impl>::suspend(int delay) template <class Impl> void -O3ThreadContext<Impl>::halt(int delay) +O3ThreadContext<Impl>::halt(Cycles delay) { DPRINTF(O3CPU, "Calling halt on Thread Context %d\n", threadId()); diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index d1b0391fc..2d7afd221 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -197,7 +197,7 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) void -AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay) +AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) { DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); @@ -208,7 +208,7 @@ AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay) assert(!tickEvent.scheduled()); notIdleFraction++; - numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend); + numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); //Make sure ticks are still on multiples of cycles schedule(tickEvent, clockEdge(delay)); @@ -518,13 +518,11 @@ AtomicSimpleCPU::tick() stall_ticks += dcache_latency; if (stall_ticks) { - Tick stall_cycles = stall_ticks / clockPeriod(); - Tick aligned_stall_ticks = ticks(stall_cycles); - - if (aligned_stall_ticks < stall_ticks) - aligned_stall_ticks += 1; - - latency += aligned_stall_ticks; + // the atomic cpu does its accounting in ticks, so + // keep counting in ticks but round to the clock + // period + latency += divCeil(stall_ticks, clockPeriod()) * + clockPeriod(); } } diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh index e88c93cce..d67ab67a5 100644 --- a/src/cpu/simple/atomic.hh +++ b/src/cpu/simple/atomic.hh @@ -127,7 +127,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU void switchOut(); void takeOverFrom(BaseCPU *oldCPU); - virtual void activateContext(ThreadID thread_num, int delay); + virtual void activateContext(ThreadID thread_num, Cycles delay); virtual void suspendContext(ThreadID thread_num); Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags); diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 5437e77aa..15b277d53 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -187,7 +187,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) void -TimingSimpleCPU::activateContext(ThreadID thread_num, int delay) +TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) { DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); @@ -229,7 +229,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt) { RequestPtr req = pkt->req; if (req->isMmappedIpr()) { - Tick delay = TheISA::handleIprRead(thread->getTC(), pkt); + Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); new IprEvent(pkt, this, clockEdge(delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; @@ -443,7 +443,7 @@ TimingSimpleCPU::handleWritePacket() { RequestPtr req = dcache_pkt->req; if (req->isMmappedIpr()) { - Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); + Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); new IprEvent(dcache_pkt, this, clockEdge(delay)); _status = DcacheWaitResponse; dcache_pkt = NULL; diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh index c4d1573af..19a4f818e 100644 --- a/src/cpu/simple/timing.hh +++ b/src/cpu/simple/timing.hh @@ -255,7 +255,7 @@ class TimingSimpleCPU : public BaseSimpleCPU void switchOut(); void takeOverFrom(BaseCPU *oldCPU); - virtual void activateContext(ThreadID thread_num, int delay); + virtual void activateContext(ThreadID thread_num, Cycles delay); virtual void suspendContext(ThreadID thread_num); Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags); diff --git a/src/cpu/simple_thread.cc b/src/cpu/simple_thread.cc index c114d04ac..f887e7e48 100644 --- a/src/cpu/simple_thread.cc +++ b/src/cpu/simple_thread.cc @@ -210,7 +210,7 @@ SimpleThread::dumpFuncProfile() } void -SimpleThread::activate(int delay) +SimpleThread::activate(Cycles delay) { if (status() == ThreadContext::Active) return; diff --git a/src/cpu/simple_thread.hh b/src/cpu/simple_thread.hh index 1595551fb..8594e4471 100644 --- a/src/cpu/simple_thread.hh +++ b/src/cpu/simple_thread.hh @@ -209,7 +209,7 @@ class SimpleThread : public ThreadState /// Set the status to Active. Optional delay indicates number of /// cycles to wait before beginning execution. - void activate(int delay = 1); + void activate(Cycles delay = Cycles(1)); /// Set the status to Suspended. void suspend(); diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index f8a8b66bd..7ea6ad84b 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -246,7 +246,7 @@ void MemTest::tick() { if (!tickEvent.scheduled()) - schedule(tickEvent, clockEdge(1)); + schedule(tickEvent, clockEdge(Cycles(1))); if (++noResponseCycles >= 500000) { if (issueDmas) { diff --git a/src/cpu/testers/networktest/networktest.cc b/src/cpu/testers/networktest/networktest.cc index 05568b3c0..3f61a87d3 100644 --- a/src/cpu/testers/networktest/networktest.cc +++ b/src/cpu/testers/networktest/networktest.cc @@ -165,7 +165,7 @@ NetworkTest::tick() exitSimLoop("Network Tester completed simCycles"); else { if (!tickEvent.scheduled()) - schedule(tickEvent, clockEdge(1)); + schedule(tickEvent, clockEdge(Cycles(1))); } } diff --git a/src/cpu/thread_context.hh b/src/cpu/thread_context.hh index e186e2f83..e16bc3b39 100644 --- a/src/cpu/thread_context.hh +++ b/src/cpu/thread_context.hh @@ -163,13 +163,13 @@ class ThreadContext /// Set the status to Active. Optional delay indicates number of /// cycles to wait before beginning execution. - virtual void activate(int delay = 1) = 0; + virtual void activate(Cycles delay = Cycles(1)) = 0; /// Set the status to Suspended. - virtual void suspend(int delay = 0) = 0; + virtual void suspend(Cycles delay = Cycles(0)) = 0; /// Set the status to Halted. - virtual void halt(int delay = 0) = 0; + virtual void halt(Cycles delay = Cycles(0)) = 0; virtual void dumpFuncProfile() = 0; @@ -329,13 +329,14 @@ class ProxyThreadContext : public ThreadContext /// Set the status to Active. Optional delay indicates number of /// cycles to wait before beginning execution. - void activate(int delay = 1) { actualTC->activate(delay); } + void activate(Cycles delay = Cycles(1)) + { actualTC->activate(delay); } /// Set the status to Suspended. - void suspend(int delay = 0) { actualTC->suspend(); } + void suspend(Cycles delay = Cycles(0)) { actualTC->suspend(); } /// Set the status to Halted. - void halt(int delay = 0) { actualTC->halt(); } + void halt(Cycles delay = Cycles(0)) { actualTC->halt(); } void dumpFuncProfile() { actualTC->dumpFuncProfile(); } diff --git a/src/dev/arm/pl111.cc b/src/dev/arm/pl111.cc index 7990e02ed..22eba1458 100644 --- a/src/dev/arm/pl111.cc +++ b/src/dev/arm/pl111.cc @@ -475,7 +475,7 @@ Pl111::fillFifo() void Pl111::dmaDone() { - Tick maxFrameTime = lcdTiming2.cpl * height; + Cycles maxFrameTime(lcdTiming2.cpl * height); --dmaPendingNum; @@ -503,8 +503,11 @@ Pl111::dmaDone() // argument into a relative number of cycles in the future by // subtracting curCycle() if (lcdControl.lcden) - schedule(readEvent, clockEdge(startTime + maxFrameTime - - curCycle())); + // @todo: This is a terrible way of doing the time + // keeping, make it all relative + schedule(readEvent, + clockEdge(Cycles(startTime - curCycle() + + maxFrameTime))); } if (dmaPendingNum > (maxOutstandingDma - waterMark)) diff --git a/src/dev/i8254xGBe.cc b/src/dev/i8254xGBe.cc index 3ba140bce..71b88377d 100644 --- a/src/dev/i8254xGBe.cc +++ b/src/dev/i8254xGBe.cc @@ -2052,7 +2052,7 @@ IGbE::restartClock() { if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() == SimObject::Running) - schedule(tickEvent, clockEdge(1)); + schedule(tickEvent, clockEdge(Cycles(1))); } unsigned int diff --git a/src/dev/sinic.cc b/src/dev/sinic.cc index 623dcf2c1..cdfa844d9 100644 --- a/src/dev/sinic.cc +++ b/src/dev/sinic.cc @@ -1321,7 +1321,7 @@ Device::transferDone() DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); - reschedule(txEvent, curTick() + ticks(1), true); + reschedule(txEvent, curTick() + clockPeriod(), true); } bool diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc index ca3fde0ed..3a185a8eb 100644 --- a/src/mem/bridge.cc +++ b/src/mem/bridge.cc @@ -56,7 +56,7 @@ Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name, Bridge& _bridge, BridgeMasterPort& _masterPort, - int _delay, int _resp_limit, + Cycles _delay, int _resp_limit, std::vector<Range<Addr> > _ranges) : SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort), delay(_delay), ranges(_ranges.begin(), _ranges.end()), @@ -68,7 +68,7 @@ Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name, Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name, Bridge& _bridge, BridgeSlavePort& _slavePort, - int _delay, int _req_limit) + Cycles _delay, int _req_limit) : MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort), delay(_delay), reqQueueLimit(_req_limit), sendEvent(*this) { @@ -76,9 +76,10 @@ Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name, Bridge::Bridge(Params *p) : MemObject(p), - slavePort(p->name + ".slave", *this, masterPort, p->delay, p->resp_size, - p->ranges), - masterPort(p->name + ".master", *this, slavePort, p->delay, p->req_size) + slavePort(p->name + ".slave", *this, masterPort, + ticksToCycles(p->delay), p->resp_size, p->ranges), + masterPort(p->name + ".master", *this, slavePort, + ticksToCycles(p->delay), p->req_size) { } @@ -140,7 +141,7 @@ Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt) DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size()); - slavePort.schedTimingResp(pkt, curTick() + delay); + slavePort.schedTimingResp(pkt, bridge.clockEdge(delay)); return true; } @@ -170,7 +171,7 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt) assert(outstandingResponses != respQueueLimit); ++outstandingResponses; retryReq = false; - masterPort.schedTimingReq(pkt, curTick() + delay); + masterPort.schedTimingReq(pkt, bridge.clockEdge(delay)); } } @@ -352,7 +353,7 @@ Bridge::BridgeSlavePort::recvRetry() Tick Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt) { - return delay + masterPort.sendAtomic(pkt); + return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt); } void diff --git a/src/mem/bridge.hh b/src/mem/bridge.hh index cf7673c47..c52146463 100644 --- a/src/mem/bridge.hh +++ b/src/mem/bridge.hh @@ -140,7 +140,7 @@ class Bridge : public MemObject BridgeMasterPort& masterPort; /** Minimum request delay though this bridge. */ - Tick delay; + Cycles delay; /** Address ranges to pass through the bridge */ AddrRangeList ranges; @@ -187,12 +187,12 @@ class Bridge : public MemObject * @param _name the port name including the owner * @param _bridge the structural owner * @param _masterPort the master port on the other side of the bridge - * @param _delay the delay from seeing a response to sending it + * @param _delay the delay in cycles from receiving to sending * @param _resp_limit the size of the response queue * @param _ranges a number of address ranges to forward */ BridgeSlavePort(const std::string& _name, Bridge& _bridge, - BridgeMasterPort& _masterPort, int _delay, + BridgeMasterPort& _masterPort, Cycles _delay, int _resp_limit, std::vector<Range<Addr> > _ranges); /** @@ -255,7 +255,7 @@ class Bridge : public MemObject BridgeSlavePort& slavePort; /** Minimum delay though this bridge. */ - Tick delay; + Cycles delay; /** * Request packet queue. Request packets are held in this @@ -286,11 +286,11 @@ class Bridge : public MemObject * @param _name the port name including the owner * @param _bridge the structural owner * @param _slavePort the slave port on the other side of the bridge - * @param _delay the delay from seeing a request to sending it + * @param _delay the delay in cycles from receiving to sending * @param _req_limit the size of the request queue */ BridgeMasterPort(const std::string& _name, Bridge& _bridge, - BridgeSlavePort& _slavePort, int _delay, + BridgeSlavePort& _slavePort, Cycles _delay, int _req_limit); /** diff --git a/src/python/m5/params.py b/src/python/m5/params.py index 3dcbecd2f..5c40a9c64 100644 --- a/src/python/m5/params.py +++ b/src/python/m5/params.py @@ -463,6 +463,8 @@ class CheckedInt(NumericParamValue): # most derived types require this, so we just do it here once code('%import "stdint.i"') code('%import "base/types.hh"') + # ignore the case operator for Cycles + code('%ignore *::operator uint64_t() const;') def getValue(self): return long(self.value) @@ -480,6 +482,7 @@ class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True +class Cycles(CheckedInt): cxx_type = 'Cycles'; size = 64; unsigned = True class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True diff --git a/src/sim/clocked_object.hh b/src/sim/clocked_object.hh index 050a15a74..78539c9c9 100644 --- a/src/sim/clocked_object.hh +++ b/src/sim/clocked_object.hh @@ -64,7 +64,7 @@ class ClockedObject : public SimObject // The cycle counter value corresponding to the current value of // 'tick' - mutable Tick cycle; + mutable Cycles cycle; /** * Prevent inadvertent use of the copy constructor and assignment @@ -96,7 +96,7 @@ class ClockedObject : public SimObject // if not, we have to recalculate the cycle and tick, we // perform the calculations in terms of relative cycles to // allow changes to the clock period in the future - Tick elapsedCycles = divCeil(curTick() - tick, clock); + Cycles elapsedCycles(divCeil(curTick() - tick, clock)); cycle += elapsedCycles; tick += elapsedCycles * clock; } @@ -130,22 +130,22 @@ class ClockedObject : public SimObject * * @return The tick when the clock edge occurs */ - inline Tick clockEdge(int cycles = 0) const + inline Tick clockEdge(Cycles cycles = Cycles(0)) const { // align tick to the next clock edge update(); // figure out when this future cycle is - return tick + ticks(cycles); + return tick + clock * cycles; } /** * Determine the current cycle, corresponding to a tick aligned to * a clock edge. * - * @return The current cycle + * @return The current cycle count */ - inline Tick curCycle() const + inline Cycles curCycle() const { // align cycle to the next clock edge. update(); @@ -162,13 +162,12 @@ class ClockedObject : public SimObject Tick nextCycle() const { return clockEdge(); } - inline Tick frequency() const { return SimClock::Frequency / clock; } - - inline Tick ticks(int cycles) const { return clock * cycles; } + inline uint64_t frequency() const { return SimClock::Frequency / clock; } inline Tick clockPeriod() const { return clock; } - inline Tick tickToCycle(Tick tick) const { return tick / clock; } + inline Cycles ticksToCycles(Tick tick) const + { return Cycles(tick / clock); } }; diff --git a/src/sim/process.cc b/src/sim/process.cc index f92fb91e2..08636b2c4 100644 --- a/src/sim/process.cc +++ b/src/sim/process.cc @@ -245,7 +245,7 @@ Process::initState() ThreadContext *tc = system->getThreadContext(contextIds[0]); // mark this context as active so it will start ticking. - tc->activate(0); + tc->activate(Cycles(0)); } // map simulator fd sim_fd to target fd tgt_fd diff --git a/src/sim/pseudo_inst.cc b/src/sim/pseudo_inst.cc index 8a7f0c469..aafa5672b 100644 --- a/src/sim/pseudo_inst.cc +++ b/src/sim/pseudo_inst.cc @@ -172,7 +172,7 @@ quiesceCycles(ThreadContext *tc, uint64_t cycles) EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent(); - Tick resume = curTick() + cpu->ticks(cycles); + Tick resume = cpu->clockEdge(Cycles(cycles)); cpu->reschedule(quiesceEvent, resume, true); |