summaryrefslogtreecommitdiff
path: root/src/mem/ruby/buffers
diff options
context:
space:
mode:
authorBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
committerBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
commite7f2da517adbc9ba4ed1b33de102126260a0d587 (patch)
treedc45a1acf1843da774f55fb1a5e27333804c4910 /src/mem/ruby/buffers
parentaf6b97e3ee2d73fcb2d4bcdbdffc9a6534dfdac8 (diff)
downloadgem5-e7f2da517adbc9ba4ed1b33de102126260a0d587.tar.xz
ruby: Stall and wait input messages instead of recycling
This patch allows messages to be stalled in their input buffers and wait until a corresponding address changes state. In order to make this work, all in_ports must be ranked in order of dependence and those in_ports that may unblock an address, must wake up the stalled messages. Alot of this complexity is handled in slicc and the specification files simply annotate the in_ports. --HG-- rename : src/mem/slicc/ast/CheckAllocateStatementAST.py => src/mem/slicc/ast/StallAndWaitStatementAST.py rename : src/mem/slicc/ast/CheckAllocateStatementAST.py => src/mem/slicc/ast/WakeUpDependentsStatementAST.py
Diffstat (limited to 'src/mem/ruby/buffers')
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc43
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.hh9
2 files changed, 52 insertions, 0 deletions
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
index 9cd1dd47b..7d28cef22 100644
--- a/src/mem/ruby/buffers/MessageBuffer.cc
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -334,6 +334,49 @@ MessageBuffer::recycle()
g_eventQueue_ptr->getTime() + m_recycle_latency);
}
+void
+MessageBuffer::reanalyzeMessages(const Address& addr)
+{
+ DEBUG_MSG(QUEUE_COMP, MedPrio, "reanalyzeMessages " + m_name);
+ assert(m_stall_msg_map.count(addr) > 0);
+
+ //
+ // Put all stalled messages associated with this address back on the
+ // prio heap
+ //
+ while(!m_stall_msg_map[addr].empty()) {
+ m_msg_counter++;
+ MessageBufferNode msgNode(g_eventQueue_ptr->getTime() + 1,
+ m_msg_counter,
+ m_stall_msg_map[addr].front());
+
+ m_prio_heap.push_back(msgNode);
+ push_heap(m_prio_heap.begin(), m_prio_heap.end(),
+ greater<MessageBufferNode>());
+
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, msgNode.m_time);
+ m_stall_msg_map[addr].pop_front();
+ }
+}
+
+void
+MessageBuffer::stallMessage(const Address& addr)
+{
+ DEBUG_MSG(QUEUE_COMP, MedPrio, "stalling " + m_name);
+ assert(isReady());
+ assert(addr.getOffset() == 0);
+ MsgPtr message = m_prio_heap.front().m_msgptr;
+
+ pop();
+
+ //
+ // Note: no event is scheduled to analyze the map at a later time.
+ // Instead the controller is responsible to call reanalyzeMessages when
+ // these addresses change state.
+ //
+ (m_stall_msg_map[addr]).push_back(message);
+}
+
int
MessageBuffer::setAndReturnDelayCycles(MsgPtr msg_ptr)
{
diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh
index e4bee5cf6..e09abeb7d 100644
--- a/src/mem/ruby/buffers/MessageBuffer.hh
+++ b/src/mem/ruby/buffers/MessageBuffer.hh
@@ -45,6 +45,7 @@
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/slicc_interface/Message.hh"
+#include "mem/ruby/common/Address.hh"
class MessageBuffer
{
@@ -58,6 +59,9 @@ class MessageBuffer
m_recycle_latency = recycle_latency;
}
+ void reanalyzeMessages(const Address& addr);
+ void stallMessage(const Address& addr);
+
// TRUE if head of queue timestamp <= SystemTime
bool
isReady() const
@@ -150,6 +154,11 @@ class MessageBuffer
// Data Members (m_ prefix)
Consumer* m_consumer_ptr; // Consumer to signal a wakeup(), can be NULL
std::vector<MessageBufferNode> m_prio_heap;
+
+ typedef m5::hash_map< Address, std::list<MsgPtr> > StallMsgMapType;
+ typedef std::vector<MsgPtr>::iterator MsgListIter;
+
+ StallMsgMapType m_stall_msg_map;
std::string m_name;
int m_max_size;