summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Hashe <david.hashe@amd.com>2015-07-20 09:15:18 -0500
committerDavid Hashe <david.hashe@amd.com>2015-07-20 09:15:18 -0500
commit63a9f10de80a2a117aa06858e65dee2b6654762f (patch)
treeee333f5d8539196fc1c1688f508b1f9101831347 /src
parentfbb220b4ae4a608a4954e7cef9007f37f1612b42 (diff)
downloadgem5-63a9f10de80a2a117aa06858e65dee2b6654762f.tar.xz
ruby: Fix for stallAndWait bug
It was previously possible for a stalled message to be reordered after an incomming message. This patch ensures that any stalled message stays in its original request order.
Diffstat (limited to 'src')
-rw-r--r--src/mem/ruby/network/MessageBuffer.cc26
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.cc8
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh1
3 files changed, 23 insertions, 12 deletions
diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc
index 0555f9b59..d823c0a1f 100644
--- a/src/mem/ruby/network/MessageBuffer.cc
+++ b/src/mem/ruby/network/MessageBuffer.cc
@@ -86,7 +86,7 @@ MessageBuffer::areNSlotsAvailable(unsigned int n)
// determine the correct size for the current cycle
// pop operations shouldn't effect the network's visible size
- // until next cycle, but enqueue operations effect the visible
+ // until schd cycle, but enqueue operations effect the visible
// size immediately
unsigned int current_size = 0;
@@ -234,7 +234,7 @@ MessageBuffer::dequeue()
m_receiver->ticksToCycles(message->getDelayedTicks());
// record previous size and time so the current buffer size isn't
- // adjusted until next cycle
+ // adjusted until schd cycle
if (m_time_last_time_pop < m_receiver->clockEdge()) {
m_size_at_cycle_start = m_prio_heap.size();
m_time_last_time_pop = m_receiver->clockEdge();
@@ -275,19 +275,19 @@ MessageBuffer::recycle()
}
void
-MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick nextTick)
+MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
{
while(!lt.empty()) {
m_msg_counter++;
MsgPtr m = lt.front();
- m->setLastEnqueueTime(nextTick);
+ m->setLastEnqueueTime(schdTick);
m->setMsgCounter(m_msg_counter);
m_prio_heap.push_back(m);
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MsgPtr>());
- m_consumer->scheduleEventAbsolute(nextTick);
+ m_consumer->scheduleEventAbsolute(schdTick);
lt.pop_front();
}
}
@@ -297,13 +297,15 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
{
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
assert(m_stall_msg_map.count(addr) > 0);
- Tick nextTick = m_receiver->clockEdge(Cycles(1));
+ Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
- // prio heap
+ // prio heap. The reanalyzeList call will make sure the consumer is
+ // scheduled for the current cycle so that the previously stalled messages
+ // will be observed before any younger messages that may arrive this cycle
//
- reanalyzeList(m_stall_msg_map[addr], nextTick);
+ reanalyzeList(m_stall_msg_map[addr], curTick);
m_stall_msg_map.erase(addr);
}
@@ -311,15 +313,17 @@ void
MessageBuffer::reanalyzeAllMessages()
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
- Tick nextTick = m_receiver->clockEdge(Cycles(1));
+ Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
- // prio heap
+ // prio heap. The reanalyzeList call will make sure the consumer is
+ // scheduled for the current cycle so that the previously stalled messages
+ // will be observed before any younger messages that may arrive this cycle.
//
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
map_iter != m_stall_msg_map.end(); ++map_iter) {
- reanalyzeList(map_iter->second, nextTick);
+ reanalyzeList(map_iter->second, curTick);
}
m_stall_msg_map.clear();
}
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index a6d05fd3a..1ac99c882 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -154,6 +154,7 @@ AbstractController::wakeUpAllBuffers()
//
std::vector<MsgVecType*> wokeUpMsgVecs;
+ MsgBufType wokeUpMsgBufs;
if(m_waiting_buffers.size() > 0) {
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
@@ -162,8 +163,13 @@ AbstractController::wakeUpAllBuffers()
for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
vec_iter != buf_iter->second->end();
++vec_iter) {
- if (*vec_iter != NULL) {
+ //
+ // Make sure the MessageBuffer has not already be reanalyzed
+ //
+ if (*vec_iter != NULL &&
+ (wokeUpMsgBufs.count(*vec_iter) == 0)) {
(*vec_iter)->reanalyzeAllMessages();
+ wokeUpMsgBufs.insert(*vec_iter);
}
}
wokeUpMsgVecs.push_back(buf_iter->second);
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index e01a2a824..aadf03bd8 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -150,6 +150,7 @@ class AbstractController : public MemObject, public Consumer
std::map<Address, MessageBuffer*> m_block_map;
typedef std::vector<MessageBuffer*> MsgVecType;
+ typedef std::set<MessageBuffer*> MsgBufType;
typedef std::map< Address, MsgVecType* > WaitingBufType;
WaitingBufType m_waiting_buffers;