summaryrefslogtreecommitdiff
path: root/src/mem/ruby
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2014-09-01 16:55:48 -0500
committerNilay Vaish <nilay@cs.wisc.edu>2014-09-01 16:55:48 -0500
commitb4dade6fb273baaf86d316ca90fba5ebc7d4b717 (patch)
treefb74926d6c1fb64b400e7dd65517369f13dab693 /src/mem/ruby
parent7a0d5aafe4b845a2d1cff6210d7c6ee66e8aba61 (diff)
downloadgem5-b4dade6fb273baaf86d316ca90fba5ebc7d4b717.tar.xz
ruby: PerfectSwitch: moves code to a per vnet helper function
This patch moves code from the wakeup() function to a operateVnet(). The aim is to improve the readiblity of the code.
Diffstat (limited to 'src/mem/ruby')
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc315
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh2
2 files changed, 159 insertions, 158 deletions
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 4565711a2..fa0709496 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -104,192 +104,166 @@ PerfectSwitch::~PerfectSwitch()
}
void
-PerfectSwitch::wakeup()
+PerfectSwitch::operateVnet(int vnet)
{
MsgPtr msg_ptr;
-
- // Give the highest numbered link priority most of the time
- m_wakeups_wo_switch++;
- int highest_prio_vnet = m_virtual_networks-1;
- int lowest_prio_vnet = 0;
- int decrementer = 1;
NetworkMessage* net_msg_ptr = NULL;
- // invert priorities to avoid starvation seen in the component network
- if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
- m_wakeups_wo_switch = 0;
- highest_prio_vnet = 0;
- lowest_prio_vnet = m_virtual_networks-1;
- decrementer = -1;
+ // This is for round-robin scheduling
+ int incoming = m_round_robin_start;
+ m_round_robin_start++;
+ if (m_round_robin_start >= m_in.size()) {
+ m_round_robin_start = 0;
}
- // For all components incoming queues
- for (int vnet = highest_prio_vnet;
- (vnet * decrementer) >= (decrementer * lowest_prio_vnet);
- vnet -= decrementer) {
+ if(m_pending_message_count[vnet] > 0) {
+ // for all input ports, use round robin scheduling
+ for (int counter = 0; counter < m_in.size(); counter++) {
+ // Round robin scheduling
+ incoming++;
+ if (incoming >= m_in.size()) {
+ incoming = 0;
+ }
- // This is for round-robin scheduling
- int incoming = m_round_robin_start;
- m_round_robin_start++;
- if (m_round_robin_start >= m_in.size()) {
- m_round_robin_start = 0;
- }
+ // temporary vectors to store the routing results
+ vector<LinkID> output_links;
+ vector<NetDest> output_link_destinations;
+
+ // Is there a message waiting?
+ auto it = m_in[incoming].find(vnet);
+ if (it == m_in[incoming].end())
+ continue;
+ MessageBuffer *buffer = (*it).second;
+
+ while (buffer->isReady()) {
+ DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+
+ // Peek at message
+ msg_ptr = buffer->peekMsgPtr();
+ net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_dsts = net_msg_ptr->getInternalDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_dsts.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (m_network_ptr->getAdaptiveRouting()) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int out = 0; out < m_out.size(); out++) {
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int out = 0; out < m_out.size(); out++) {
+ int out_queue_length = 0;
+ for (int v = 0; v < m_virtual_networks; v++) {
+ out_queue_length += m_out[out][v]->getSize();
+ }
+ int value =
+ (out_queue_length << 8) | (random() & 0xff);
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = value;
+ }
- if(m_pending_message_count[vnet] > 0) {
- // for all input ports, use round robin scheduling
- for (int counter = 0; counter < m_in.size(); counter++) {
- // Round robin scheduling
- incoming++;
- if (incoming >= m_in.size()) {
- incoming = 0;
+ // Look at the most empty link first
+ sort(m_link_order.begin(), m_link_order.end());
+ }
}
- // temporary vectors to store the routing results
- vector<LinkID> output_links;
- vector<NetDest> output_link_destinations;
-
- // Is there a message waiting?
- auto it = m_in[incoming].find(vnet);
- if (it == m_in[incoming].end())
- continue;
- MessageBuffer *buffer = (*it).second;
+ for (int i = 0; i < m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+ NetDest dst = m_routing_table[link];
+ DPRINTF(RubyNetwork, "dst: %s\n", dst);
- while (buffer->isReady()) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+ if (!msg_dsts.intersectionIsNotEmpty(dst))
+ continue;
- // Peek at message
- msg_ptr = buffer->peekMsgPtr();
- net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+ // Remember what link we're using
+ output_links.push_back(link);
- output_links.clear();
- output_link_destinations.clear();
- NetDest msg_dsts =
- net_msg_ptr->getInternalDestination();
+ // Need to remember which destinations need this message in
+ // another vector. This Set is the intersection of the
+ // routing_table entry and the current destination set. The
+ // intersection must not be empty, since we are inside "if"
+ output_link_destinations.push_back(msg_dsts.AND(dst));
- // Unfortunately, the token-protocol sends some
- // zero-destination messages, so this assert isn't valid
- // assert(msg_dsts.count() > 0);
-
- assert(m_link_order.size() == m_routing_table.size());
- assert(m_link_order.size() == m_out.size());
+ // Next, we update the msg_destination not to include
+ // those nodes that were already handled by this link
+ msg_dsts.removeNetDest(dst);
+ }
- if (m_network_ptr->getAdaptiveRouting()) {
- if (m_network_ptr->isVNetOrdered(vnet)) {
- // Don't adaptively route
- for (int out = 0; out < m_out.size(); out++) {
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = 0;
- }
- } else {
- // Find how clogged each link is
- for (int out = 0; out < m_out.size(); out++) {
- int out_queue_length = 0;
- for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
- }
- int value =
- (out_queue_length << 8) | (random() & 0xff);
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = value;
- }
+ assert(msg_dsts.count() == 0);
- // Look at the most empty link first
- sort(m_link_order.begin(), m_link_order.end());
- }
- }
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i = 0; i < output_links.size(); i++) {
+ int outgoing = output_links[i];
- for (int i = 0; i < m_routing_table.size(); i++) {
- // pick the next link to look at
- int link = m_link_order[i].m_link;
- NetDest dst = m_routing_table[link];
- DPRINTF(RubyNetwork, "dst: %s\n", dst);
-
- if (!msg_dsts.intersectionIsNotEmpty(dst))
- continue;
-
- // Remember what link we're using
- output_links.push_back(link);
-
- // Need to remember which destinations need this
- // message in another vector. This Set is the
- // intersection of the routing_table entry and the
- // current destination set. The intersection must
- // not be empty, since we are inside "if"
- output_link_destinations.push_back(msg_dsts.AND(dst));
-
- // Next, we update the msg_destination not to
- // include those nodes that were already handled
- // by this link
- msg_dsts.removeNetDest(dst);
- }
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ enough = false;
- assert(msg_dsts.count() == 0);
- //assert(output_links.size() > 0);
-
- // Check for resources - for all outgoing queues
- bool enough = true;
- for (int i = 0; i < output_links.size(); i++) {
- int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
- enough = false;
- DPRINTF(RubyNetwork, "Checking if node is blocked ..."
- "outgoing: %d, vnet: %d, enough: %d\n",
- outgoing, vnet, enough);
- }
+ DPRINTF(RubyNetwork, "Checking if node is blocked ..."
+ "outgoing: %d, vnet: %d, enough: %d\n",
+ outgoing, vnet, enough);
+ }
- // There were not enough resources
- if (!enough) {
- scheduleEvent(Cycles(1));
- DPRINTF(RubyNetwork, "Can't deliver message since a node "
- "is blocked\n");
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
- break; // go to next incoming port
- }
+ // There were not enough resources
+ if (!enough) {
+ scheduleEvent(Cycles(1));
+ DPRINTF(RubyNetwork, "Can't deliver message since a node "
+ "is blocked\n");
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+ break; // go to next incoming port
+ }
- MsgPtr unmodified_msg_ptr;
+ MsgPtr unmodified_msg_ptr;
- if (output_links.size() > 1) {
- // If we are sending this message down more than
- // one link (size>1), we need to make a copy of
- // the message so each branch can have a different
- // internal destination we need to create an
- // unmodified MsgPtr because the MessageBuffer
- // enqueue func will modify the message
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than one link
+ // (size>1), we need to make a copy of the message so each
+ // branch can have a different internal destination we need
+ // to create an unmodified MsgPtr because the MessageBuffer
+ // enqueue func will modify the message
- // This magic line creates a private copy of the
- // message
- unmodified_msg_ptr = msg_ptr->clone();
- }
+ // This magic line creates a private copy of the message
+ unmodified_msg_ptr = msg_ptr->clone();
+ }
- // Dequeue msg
- buffer->dequeue();
- m_pending_message_count[vnet]--;
+ // Dequeue msg
+ buffer->dequeue();
+ m_pending_message_count[vnet]--;
- // Enqueue it - for all outgoing queues
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
- if (i > 0) {
- // create a private copy of the unmodified
- // message
- msg_ptr = unmodified_msg_ptr->clone();
- }
+ if (i > 0) {
+ // create a private copy of the unmodified message
+ msg_ptr = unmodified_msg_ptr->clone();
+ }
- // Change the internal destination set of the
- // message so it knows which destinations this
- // link is responsible for.
- net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
- net_msg_ptr->getInternalDestination() =
- output_link_destinations[i];
+ // Change the internal destination set of the message so it
+ // knows which destinations this link is responsible for.
+ net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
+ net_msg_ptr->getInternalDestination() =
+ output_link_destinations[i];
- // Enqeue msg
- DPRINTF(RubyNetwork, "Enqueuing net msg from "
- "inport[%d][%d] to outport [%d][%d].\n",
- incoming, vnet, outgoing, vnet);
+ // Enqeue msg
+ DPRINTF(RubyNetwork, "Enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d].\n",
+ incoming, vnet, outgoing, vnet);
- m_out[outgoing][vnet]->enqueue(msg_ptr);
- }
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
}
}
}
@@ -297,6 +271,31 @@ PerfectSwitch::wakeup()
}
void
+PerfectSwitch::wakeup()
+{
+ // Give the highest numbered link priority most of the time
+ m_wakeups_wo_switch++;
+ int highest_prio_vnet = m_virtual_networks-1;
+ int lowest_prio_vnet = 0;
+ int decrementer = 1;
+
+ // invert priorities to avoid starvation seen in the component network
+ if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
+ m_wakeups_wo_switch = 0;
+ highest_prio_vnet = 0;
+ lowest_prio_vnet = m_virtual_networks-1;
+ decrementer = -1;
+ }
+
+ // For all components incoming queues
+ for (int vnet = highest_prio_vnet;
+ (vnet * decrementer) >= (decrementer * lowest_prio_vnet);
+ vnet -= decrementer) {
+ operateVnet(vnet);
+ }
+}
+
+void
PerfectSwitch::storeEventInfo(int info)
{
m_pending_message_count[info]++;
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
index 25e3e2754..161430bd1 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -84,6 +84,8 @@ class PerfectSwitch : public Consumer
PerfectSwitch(const PerfectSwitch& obj);
PerfectSwitch& operator=(const PerfectSwitch& obj);
+ void operateVnet(int vnet);
+
SwitchID m_switch_id;
// vector of queues from the components