summaryrefslogtreecommitdiff
path: root/src/mem/ruby/network
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/ruby/network')
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc281
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh2
2 files changed, 151 insertions, 132 deletions
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 7229c724f..5c461c63f 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -54,6 +54,11 @@ PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
m_round_robin_start = 0;
m_network_ptr = network_ptr;
m_wakeups_wo_switch = 0;
+
+ for(int i = 0;i < m_virtual_networks;++i)
+ {
+ m_pending_message_count.push_back(0);
+ }
}
void
@@ -62,12 +67,15 @@ PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
assert(in.size() == m_virtual_networks);
NodeID port = m_in.size();
m_in.push_back(in);
+
for (int j = 0; j < m_virtual_networks; j++) {
m_in[port][j]->setConsumer(this);
string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
NodeIDToString(m_switch_id), NodeIDToString(port),
NodeIDToString(j));
m_in[port][j]->setDescription(desc);
+ m_in[port][j]->setIncomingLink(port);
+ m_in[port][j]->setVnet(j);
}
}
@@ -154,161 +162,170 @@ PerfectSwitch::wakeup()
m_round_robin_start = 0;
}
- // for all input ports, use round robin scheduling
- for (int counter = 0; counter < m_in.size(); counter++) {
- // Round robin scheduling
- incoming++;
- if (incoming >= m_in.size()) {
- incoming = 0;
- }
+ if(m_pending_message_count[vnet] > 0) {
+ // for all input ports, use round robin scheduling
+ for (int counter = 0; counter < m_in.size(); counter++) {
+ // Round robin scheduling
+ incoming++;
+ if (incoming >= m_in.size()) {
+ incoming = 0;
+ }
- // temporary vectors to store the routing results
- vector<LinkID> output_links;
- vector<NetDest> output_link_destinations;
-
- // Is there a message waiting?
- while (m_in[incoming][vnet]->isReady()) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
-
- // Peek at message
- msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
- net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
-
- output_links.clear();
- output_link_destinations.clear();
- NetDest msg_dsts =
- net_msg_ptr->getInternalDestination();
-
- // Unfortunately, the token-protocol sends some
- // zero-destination messages, so this assert isn't valid
- // assert(msg_dsts.count() > 0);
-
- assert(m_link_order.size() == m_routing_table.size());
- assert(m_link_order.size() == m_out.size());
-
- if (m_network_ptr->getAdaptiveRouting()) {
- if (m_network_ptr->isVNetOrdered(vnet)) {
- // Don't adaptively route
- for (int out = 0; out < m_out.size(); out++) {
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = 0;
- }
- } else {
- // Find how clogged each link is
- for (int out = 0; out < m_out.size(); out++) {
- int out_queue_length = 0;
- for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
+ // temporary vectors to store the routing results
+ vector<LinkID> output_links;
+ vector<NetDest> output_link_destinations;
+
+ // Is there a message waiting?
+ while (m_in[incoming][vnet]->isReady()) {
+ DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+
+ // Peek at message
+ msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+ net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_dsts =
+ net_msg_ptr->getInternalDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_dsts.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (m_network_ptr->getAdaptiveRouting()) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int out = 0; out < m_out.size(); out++) {
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int out = 0; out < m_out.size(); out++) {
+ int out_queue_length = 0;
+ for (int v = 0; v < m_virtual_networks; v++) {
+ out_queue_length += m_out[out][v]->getSize();
+ }
+ int value =
+ (out_queue_length << 8) | (random() & 0xff);
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = value;
}
- int value =
- (out_queue_length << 8) | (random() & 0xff);
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = value;
+
+ // Look at the most empty link first
+ sort(m_link_order.begin(), m_link_order.end());
}
+ }
- // Look at the most empty link first
- sort(m_link_order.begin(), m_link_order.end());
+ for (int i = 0; i < m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+ NetDest dst = m_routing_table[link];
+ DPRINTF(RubyNetwork, "dst: %s\n", dst);
+
+ if (!msg_dsts.intersectionIsNotEmpty(dst))
+ continue;
+
+ // Remember what link we're using
+ output_links.push_back(link);
+
+ // Need to remember which destinations need this
+ // message in another vector. This Set is the
+ // intersection of the routing_table entry and the
+ // current destination set. The intersection must
+ // not be empty, since we are inside "if"
+ output_link_destinations.push_back(msg_dsts.AND(dst));
+
+ // Next, we update the msg_destination not to
+ // include those nodes that were already handled
+ // by this link
+ msg_dsts.removeNetDest(dst);
}
- }
- for (int i = 0; i < m_routing_table.size(); i++) {
- // pick the next link to look at
- int link = m_link_order[i].m_link;
- NetDest dst = m_routing_table[link];
- DPRINTF(RubyNetwork, "dst: %s\n", dst);
-
- if (!msg_dsts.intersectionIsNotEmpty(dst))
- continue;
-
- // Remember what link we're using
- output_links.push_back(link);
-
- // Need to remember which destinations need this
- // message in another vector. This Set is the
- // intersection of the routing_table entry and the
- // current destination set. The intersection must
- // not be empty, since we are inside "if"
- output_link_destinations.push_back(msg_dsts.AND(dst));
-
- // Next, we update the msg_destination not to
- // include those nodes that were already handled
- // by this link
- msg_dsts.removeNetDest(dst);
- }
+ assert(msg_dsts.count() == 0);
+ //assert(output_links.size() > 0);
+
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i = 0; i < output_links.size(); i++) {
+ int outgoing = output_links[i];
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ enough = false;
+ DPRINTF(RubyNetwork, "Checking if node is blocked\n"
+ "outgoing: %d, vnet: %d, enough: %d\n",
+ outgoing, vnet, enough);
+ }
- assert(msg_dsts.count() == 0);
- //assert(output_links.size() > 0);
-
- // Check for resources - for all outgoing queues
- bool enough = true;
- for (int i = 0; i < output_links.size(); i++) {
- int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
- enough = false;
- DPRINTF(RubyNetwork, "Checking if node is blocked\n"
- "outgoing: %d, vnet: %d, enough: %d\n",
- outgoing, vnet, enough);
- }
+ // There were not enough resources
+ if (!enough) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ DPRINTF(RubyNetwork, "Can't deliver message since a node "
+ "is blocked\n"
+ "Message: %s\n", (*net_msg_ptr));
+ break; // go to next incoming port
+ }
- // There were not enough resources
- if (!enough) {
- g_eventQueue_ptr->scheduleEvent(this, 1);
- DPRINTF(RubyNetwork, "Can't deliver message since a node "
- "is blocked\n"
- "Message: %s\n", (*net_msg_ptr));
- break; // go to next incoming port
- }
+ MsgPtr unmodified_msg_ptr;
- MsgPtr unmodified_msg_ptr;
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than
+ // one link (size>1), we need to make a copy of
+ // the message so each branch can have a different
+ // internal destination we need to create an
+ // unmodified MsgPtr because the MessageBuffer
+ // enqueue func will modify the message
- if (output_links.size() > 1) {
- // If we are sending this message down more than
- // one link (size>1), we need to make a copy of
- // the message so each branch can have a different
- // internal destination we need to create an
- // unmodified MsgPtr because the MessageBuffer
- // enqueue func will modify the message
+ // This magic line creates a private copy of the
+ // message
+ unmodified_msg_ptr = msg_ptr->clone();
+ }
- // This magic line creates a private copy of the
- // message
- unmodified_msg_ptr = msg_ptr->clone();
- }
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
- // Enqueue it - for all outgoing queues
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
+ if (i > 0) {
+ // create a private copy of the unmodified
+ // message
+ msg_ptr = unmodified_msg_ptr->clone();
+ }
- if (i > 0) {
- // create a private copy of the unmodified
- // message
- msg_ptr = unmodified_msg_ptr->clone();
- }
+ // Change the internal destination set of the
+ // message so it knows which destinations this
+ // link is responsible for.
+ net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
+ net_msg_ptr->getInternalDestination() =
+ output_link_destinations[i];
- // Change the internal destination set of the
- // message so it knows which destinations this
- // link is responsible for.
- net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
- net_msg_ptr->getInternalDestination() =
- output_link_destinations[i];
+ // Enqeue msg
+ DPRINTF(RubyNetwork, "Switch: %d enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d] time: %lld.\n",
+ m_switch_id, incoming, vnet, outgoing, vnet,
+ g_eventQueue_ptr->getTime());
- // Enqeue msg
- DPRINTF(RubyNetwork, "Switch: %d enqueuing net msg from "
- "inport[%d][%d] to outport [%d][%d] time: %lld.\n",
- m_switch_id, incoming, vnet, outgoing, vnet,
- g_eventQueue_ptr->getTime());
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
+ }
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ // Dequeue msg
+ m_in[incoming][vnet]->pop();
+ m_pending_message_count[vnet]--;
}
-
- // Dequeue msg
- m_in[incoming][vnet]->pop();
}
}
}
}
void
+PerfectSwitch::storeEventInfo(int info)
+{
+ m_pending_message_count[info]++;
+}
+
+void
PerfectSwitch::printStats(std::ostream& out) const
{
out << "PerfectSwitch printStats" << endl;
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
index a7e577df0..cd0219fd9 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -69,6 +69,7 @@ class PerfectSwitch : public Consumer
int getOutLinks() const { return m_out.size(); }
void wakeup();
+ void storeEventInfo(int info);
void printStats(std::ostream& out) const;
void clearStats();
@@ -92,6 +93,7 @@ class PerfectSwitch : public Consumer
int m_round_robin_start;
int m_wakeups_wo_switch;
SimpleNetwork* m_network_ptr;
+ std::vector<int> m_pending_message_count;
};
inline std::ostream&