summaryrefslogtreecommitdiff
path: root/src/dev/arm/pl111.cc
diff options
context:
space:
mode:
authorAndreas Sandberg <Andreas.Sandberg@ARM.com>2013-01-07 13:05:37 -0500
committerAndreas Sandberg <Andreas.Sandberg@ARM.com>2013-01-07 13:05:37 -0500
commit63f1d0516d8a4e5a688cc3000b5a3597518d3dc7 (patch)
treea1a33ec62895dd6f8fa2de234a6186112e33b40c /src/dev/arm/pl111.cc
parentfffdc6a45019639cd8f899fa81eeb732db3e6f8c (diff)
downloadgem5-63f1d0516d8a4e5a688cc3000b5a3597518d3dc7.tar.xz
arm: Fix DMA event handling bug in the PL111 model
The PL111 model currently maintains a list of pre-allocated DmaDoneEvents to prevent unnecessary heap allocations. This list effectively works like a stack where the top element is the latest scheduled event. When an event triggers, the top pointer is moved down the stack. This obviously breaks since events usually retire from the bottom (events don't necessarily have to retire in order), which triggers the following assertion: gem5.debug: build/ARM/dev/arm/pl111.cc:460: void Pl111::fillFifo(): \ Assertion `!dmaDoneEvent[dmaPendingNum-1].scheduled()' failed. This changeset adds a vector listing the currently unused events. This vector acts like a stack where the an element is popped off the stack when a new event is needed an pushed on the stack when they trigger.
Diffstat (limited to 'src/dev/arm/pl111.cc')
-rw-r--r--src/dev/arm/pl111.cc24
1 files changed, 18 insertions, 6 deletions
diff --git a/src/dev/arm/pl111.cc b/src/dev/arm/pl111.cc
index 2973cda27..cd1f00272 100644
--- a/src/dev/arm/pl111.cc
+++ b/src/dev/arm/pl111.cc
@@ -68,7 +68,9 @@ Pl111::Pl111(const Params *p)
vnc(p->vnc), bmp(NULL), width(LcdMaxWidth), height(LcdMaxHeight),
bytesPerPixel(4), startTime(0), startAddr(0), maxAddr(0), curAddr(0),
waterMark(0), dmaPendingNum(0), readEvent(this), fillFifoEvent(this),
- dmaDoneEvent(maxOutstandingDma, this), intEvent(this)
+ dmaDoneEventAll(maxOutstandingDma, this),
+ dmaDoneEventFree(maxOutstandingDma),
+ intEvent(this)
{
pioSize = 0xFFFF;
@@ -81,6 +83,9 @@ Pl111::Pl111(const Params *p)
memset(cursorImage, 0, sizeof(cursorImage));
memset(dmaBuffer, 0, buffer_size);
+ for (int i = 0; i < maxOutstandingDma; ++i)
+ dmaDoneEventFree[i] = &dmaDoneEventAll[i];
+
if (vnc)
vnc->setFramebufferAddr(dmaBuffer);
}
@@ -458,14 +463,17 @@ Pl111::fillFifo()
// due to assertion in scheduling state
++dmaPendingNum;
- assert(!dmaDoneEvent[dmaPendingNum-1].scheduled());
+ assert(!dmaDoneEventFree.empty());
+ DmaDoneEvent *event(dmaDoneEventFree.back());
+ dmaDoneEventFree.pop_back();
+ assert(!event->scheduled());
// We use a uncachable request here because the requests from the CPU
// will be uncacheable as well. If we have uncacheable and cacheable
// requests in the memory system for the same address it won't be
// pleased
dmaPort.dmaAction(MemCmd::ReadReq, curAddr + startAddr, dmaSize,
- &dmaDoneEvent[dmaPendingNum-1], curAddr + dmaBuffer,
+ event, curAddr + dmaBuffer,
0, Request::UNCACHEABLE);
curAddr += dmaSize;
}
@@ -599,8 +607,8 @@ Pl111::serialize(std::ostream &os)
vector<Tick> dma_done_event_tick;
dma_done_event_tick.resize(maxOutstandingDma);
for (int x = 0; x < maxOutstandingDma; x++) {
- dma_done_event_tick[x] = dmaDoneEvent[x].scheduled() ?
- dmaDoneEvent[x].when() : 0;
+ dma_done_event_tick[x] = dmaDoneEventAll[x].scheduled() ?
+ dmaDoneEventAll[x].when() : 0;
}
arrayParamOut(os, "dma_done_event_tick", dma_done_event_tick);
}
@@ -701,10 +709,14 @@ Pl111::unserialize(Checkpoint *cp, const std::string &section)
vector<Tick> dma_done_event_tick;
dma_done_event_tick.resize(maxOutstandingDma);
arrayParamIn(cp, section, "dma_done_event_tick", dma_done_event_tick);
+ dmaDoneEventFree.clear();
for (int x = 0; x < maxOutstandingDma; x++) {
if (dma_done_event_tick[x])
- schedule(dmaDoneEvent[x], dma_done_event_tick[x]);
+ schedule(dmaDoneEventAll[x], dma_done_event_tick[x]);
+ else
+ dmaDoneEventFree.push_back(&dmaDoneEventAll[x]);
}
+ assert(maxOutstandingDma - dmaDoneEventFree.size() == dmaPendingNum);
if (lcdControl.lcdpwr) {
updateVideoParams();