summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cpu/inorder/cpu.cc2
-rw-r--r--src/cpu/o3/cpu.cc2
-rw-r--r--src/cpu/simple/timing.cc8
-rw-r--r--src/dev/arm/hdlcd.cc8
-rw-r--r--src/dev/arm/pl111.cc6
-rw-r--r--src/mem/bridge.cc4
-rw-r--r--src/mem/bus.cc2
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.cc2
-rw-r--r--src/sim/clocked_object.hh7
9 files changed, 21 insertions, 20 deletions
diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc
index 5c07621e3..2c6b49d82 100644
--- a/src/cpu/inorder/cpu.cc
+++ b/src/cpu/inorder/cpu.cc
@@ -1715,7 +1715,7 @@ InOrderCPU::wakeCPU()
numCycles += extra_cycles;
- schedule(&tickEvent, nextCycle());
+ schedule(&tickEvent, clockEdge());
}
// Lots of copied full system code...place into BaseCPU class?
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 9caa49ad6..99beaa176 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -1720,7 +1720,7 @@ FullO3CPU<Impl>::wakeCPU()
idleCycles += cycles;
numCycles += cycles;
- schedule(tickEvent, nextCycle());
+ schedule(tickEvent, clockEdge());
}
template <class Impl>
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index ab4ea9256..1f453ca63 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -120,7 +120,7 @@ TimingSimpleCPU::drain(DrainManager *drain_manager)
// succeed on the first attempt. We need to reschedule it if
// the CPU is waiting for a microcode routine to complete.
if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
- schedule(fetchEvent, nextCycle());
+ schedule(fetchEvent, clockEdge());
return 1;
}
@@ -616,7 +616,7 @@ TimingSimpleCPU::advanceInst(Fault fault)
if (fault != NoFault) {
advancePC(fault);
DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
- reschedule(fetchEvent, nextCycle(), true);
+ reschedule(fetchEvent, clockEdge(), true);
_status = Faulting;
return;
}
@@ -715,7 +715,7 @@ TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
// delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle();
+ Tick next_tick = cpu->clockEdge();
if (next_tick == curTick())
cpu->completeIfetch(pkt);
@@ -807,7 +807,7 @@ bool
TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
{
// delay processing of returned data until next CPU clock edge
- Tick next_tick = cpu->nextCycle();
+ Tick next_tick = cpu->clockEdge();
if (next_tick == curTick()) {
cpu->completeDataAccess(pkt);
diff --git a/src/dev/arm/hdlcd.cc b/src/dev/arm/hdlcd.cc
index 14128c730..c5daebc9b 100644
--- a/src/dev/arm/hdlcd.cc
+++ b/src/dev/arm/hdlcd.cc
@@ -282,7 +282,7 @@ HDLcd::write(PacketPtr pkt)
if (new_command.enable) {
doUpdateParams = true;
if (!frameUnderway) {
- schedule(startFrameEvent, nextCycle());
+ schedule(startFrameEvent, clockEdge());
}
}
}
@@ -514,7 +514,7 @@ HDLcd::renderPixel()
frameUnderrun = true;
int_rawstat.underrun = 1;
if (!intEvent.scheduled())
- schedule(intEvent, nextCycle());
+ schedule(intEvent, clockEdge());
} else {
// emulate the pixel read from the internal buffer
pixelBufferSize -= bytesPerPixel() * count;
@@ -524,7 +524,7 @@ HDLcd::renderPixel()
// the DMA may have previously stalled due to the buffer being full;
// give it a kick; it knows not to fill if at end of frame, underrun, etc
if (!fillPixelBufferEvent.scheduled())
- schedule(fillPixelBufferEvent, nextCycle());
+ schedule(fillPixelBufferEvent, clockEdge());
// schedule the next pixel read according to where it is in the frame
pixelIndex += count;
@@ -597,7 +597,7 @@ HDLcd::dmaDone(DmaDoneEvent *event)
if ((dmaCurAddr < dmaMaxAddr) &&
(bytesFreeInPixelBuffer() + targetTransSize < PIXEL_BUFFER_CAPACITY) &&
!fillPixelBufferEvent.scheduled()) {
- schedule(fillPixelBufferEvent, nextCycle());
+ schedule(fillPixelBufferEvent, clockEdge());
}
}
diff --git a/src/dev/arm/pl111.cc b/src/dev/arm/pl111.cc
index 8460010f6..5929da07c 100644
--- a/src/dev/arm/pl111.cc
+++ b/src/dev/arm/pl111.cc
@@ -441,7 +441,7 @@ Pl111::readFramebuffer()
// Updating base address, interrupt if we're supposed to
lcdRis.baseaddr = 1;
if (!intEvent.scheduled())
- schedule(intEvent, nextCycle());
+ schedule(intEvent, clockEdge());
curAddr = 0;
startTime = curTick();
@@ -492,7 +492,7 @@ Pl111::dmaDone()
" have taken %d\n", curTick() - startTime, maxFrameTime);
lcdRis.underflow = 1;
if (!intEvent.scheduled())
- schedule(intEvent, nextCycle());
+ schedule(intEvent, clockEdge());
}
assert(!readEvent.scheduled());
@@ -522,7 +522,7 @@ Pl111::dmaDone()
return;
if (!fillFifoEvent.scheduled())
- schedule(fillFifoEvent, nextCycle());
+ schedule(fillFifoEvent, clockEdge());
}
void
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 1a8437aa1..91bef2757 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -277,7 +277,7 @@ Bridge::BridgeMasterPort::trySendTiming()
req = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(req.tick,
- bridge.nextCycle()));
+ bridge.clockEdge()));
}
// if we have stalled a request due to a full request queue,
@@ -318,7 +318,7 @@ Bridge::BridgeSlavePort::trySendTiming()
resp = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(resp.tick,
- bridge.nextCycle()));
+ bridge.clockEdge()));
}
// if there is space in the request queue and we were stalling
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index 368d49c86..8546df565 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -135,7 +135,7 @@ BaseBus::calcPacketTiming(PacketPtr pkt)
// the bus will be called at a time that is not necessarily
// coinciding with its own clock, so start by determining how long
// until the next clock edge (could be zero)
- Tick offset = nextCycle() - curTick();
+ Tick offset = clockEdge() - curTick();
// determine how many cycles are needed to send the data
unsigned dataCycles = pkt->hasData() ? divCeil(pkt->getSize(), width) : 0;
diff --git a/src/mem/ruby/system/RubyMemoryControl.cc b/src/mem/ruby/system/RubyMemoryControl.cc
index 75e6e1b06..5ffc60e2b 100644
--- a/src/mem/ruby/system/RubyMemoryControl.cc
+++ b/src/mem/ruby/system/RubyMemoryControl.cc
@@ -307,7 +307,7 @@ RubyMemoryControl::enqueueMemRef(MemoryNode& memRef)
m_input_queue.push_back(memRef);
if (!m_event.scheduled()) {
- schedule(m_event, nextCycle());
+ schedule(m_event, clockEdge());
}
}
diff --git a/src/sim/clocked_object.hh b/src/sim/clocked_object.hh
index bf132bee1..d836c48cc 100644
--- a/src/sim/clocked_object.hh
+++ b/src/sim/clocked_object.hh
@@ -172,13 +172,14 @@ class ClockedObject : public SimObject
}
/**
- * Based on the clock of the object, determine the tick when the
- * next cycle begins, in other words, return the next clock edge.
+ * Based on the clock of the object, determine the tick when the next
+ * cycle begins, in other words, return the next clock edge.
+ * (This can never be the current tick.)
*
* @return The tick when the next cycle starts
*/
Tick nextCycle() const
- { return clockEdge(); }
+ { return clockEdge(Cycles(1)); }
inline uint64_t frequency() const { return SimClock::Frequency / clock; }