summaryrefslogtreecommitdiff
path: root/src/mem/ruby/buffers/MessageBuffer.cc
diff options
context:
space:
mode:
authorBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
committerBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
commite7f2da517adbc9ba4ed1b33de102126260a0d587 (patch)
treedc45a1acf1843da774f55fb1a5e27333804c4910 /src/mem/ruby/buffers/MessageBuffer.cc
parentaf6b97e3ee2d73fcb2d4bcdbdffc9a6534dfdac8 (diff)
downloadgem5-e7f2da517adbc9ba4ed1b33de102126260a0d587.tar.xz
ruby: Stall and wait input messages instead of recycling
This patch allows messages to be stalled in their input buffers and wait until a corresponding address changes state. In order to make this work, all in_ports must be ranked in order of dependence and those in_ports that may unblock an address, must wake up the stalled messages. Alot of this complexity is handled in slicc and the specification files simply annotate the in_ports. --HG-- rename : src/mem/slicc/ast/CheckAllocateStatementAST.py => src/mem/slicc/ast/StallAndWaitStatementAST.py rename : src/mem/slicc/ast/CheckAllocateStatementAST.py => src/mem/slicc/ast/WakeUpDependentsStatementAST.py
Diffstat (limited to 'src/mem/ruby/buffers/MessageBuffer.cc')
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc43
1 files changed, 43 insertions, 0 deletions
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
index 9cd1dd47b..7d28cef22 100644
--- a/src/mem/ruby/buffers/MessageBuffer.cc
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -334,6 +334,49 @@ MessageBuffer::recycle()
g_eventQueue_ptr->getTime() + m_recycle_latency);
}
+void
+MessageBuffer::reanalyzeMessages(const Address& addr)
+{
+ DEBUG_MSG(QUEUE_COMP, MedPrio, "reanalyzeMessages " + m_name);
+ assert(m_stall_msg_map.count(addr) > 0);
+
+ //
+ // Put all stalled messages associated with this address back on the
+ // prio heap
+ //
+ while(!m_stall_msg_map[addr].empty()) {
+ m_msg_counter++;
+ MessageBufferNode msgNode(g_eventQueue_ptr->getTime() + 1,
+ m_msg_counter,
+ m_stall_msg_map[addr].front());
+
+ m_prio_heap.push_back(msgNode);
+ push_heap(m_prio_heap.begin(), m_prio_heap.end(),
+ greater<MessageBufferNode>());
+
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, msgNode.m_time);
+ m_stall_msg_map[addr].pop_front();
+ }
+}
+
+void
+MessageBuffer::stallMessage(const Address& addr)
+{
+ DEBUG_MSG(QUEUE_COMP, MedPrio, "stalling " + m_name);
+ assert(isReady());
+ assert(addr.getOffset() == 0);
+ MsgPtr message = m_prio_heap.front().m_msgptr;
+
+ pop();
+
+ //
+ // Note: no event is scheduled to analyze the map at a later time.
+ // Instead the controller is responsible to call reanalyzeMessages when
+ // these addresses change state.
+ //
+ (m_stall_msg_map[addr]).push_back(message);
+}
+
int
MessageBuffer::setAndReturnDelayCycles(MsgPtr msg_ptr)
{