summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2010-03-31 16:56:45 -0700
committerNathan Binkert <nate@binkert.org>2010-03-31 16:56:45 -0700
commitbe10204729c107b41d5d7487323c732e9fa09df5 (patch)
tree5c8f4001c490c4d777e8756e536cd2f2340c9ebb
parent60ae1d2b10002bb73b420fce91c4b74397c55457 (diff)
downloadgem5-be10204729c107b41d5d7487323c732e9fa09df5.tar.xz
style: another ruby style pass
-rw-r--r--src/mem/ruby/SConsopts4
-rw-r--r--src/mem/ruby/common/Address.hh4
-rw-r--r--src/mem/ruby/eventqueue/RubyEventQueueNode.hh14
-rw-r--r--src/mem/ruby/libruby.hh6
-rw-r--r--src/mem/ruby/network/Network.cc74
-rw-r--r--src/mem/ruby/network/Network.hh158
-rw-r--r--src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc66
-rw-r--r--src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh17
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc475
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh127
-rw-r--r--src/mem/ruby/network/simple/PtToPtTopology.hh17
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.cc349
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.hh148
-rw-r--r--src/mem/ruby/network/simple/Switch.cc259
-rw-r--r--src/mem/ruby/network/simple/Switch.hh109
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc343
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh151
-rw-r--r--src/mem/ruby/network/simple/Topology.cc546
-rw-r--r--src/mem/ruby/network/simple/Topology.hh167
-rw-r--r--src/mem/ruby/network/simple/Torus2DTopology.cc84
-rw-r--r--src/mem/ruby/network/simple/Torus2DTopology.hh17
-rw-r--r--src/mem/ruby/network/topologies/Crossbar.py2
-rw-r--r--src/mem/ruby/network/topologies/Mesh.py19
-rw-r--r--src/mem/ruby/network/topologies/MeshDirCorners.py41
-rw-r--r--src/mem/ruby/network/topologies/SConscript2
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.cc2
-rw-r--r--src/mem/ruby/profiler/Profiler.cc8
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.cc56
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.hh66
-rw-r--r--src/mem/ruby/recorder/TraceRecord.cc164
-rw-r--r--src/mem/ruby/recorder/TraceRecord.hh118
-rw-r--r--src/mem/ruby/recorder/Tracer.cc165
-rw-r--r--src/mem/ruby/recorder/Tracer.hh90
-rw-r--r--src/mem/ruby/storebuffer/hfa.hh103
-rw-r--r--src/mem/ruby/storebuffer/hfatypes.hh80
-rw-r--r--src/mem/ruby/storebuffer/interface.cc67
-rw-r--r--src/mem/ruby/storebuffer/interface.hh46
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.cc72
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.hh20
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.cc672
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.hh163
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc4
-rw-r--r--src/mem/ruby/system/PersistentTable.cc2
-rw-r--r--src/mem/ruby/system/PersistentTable.hh2
-rw-r--r--src/mem/ruby/system/SparseMemory.cc102
-rw-r--r--src/mem/ruby/system/SparseMemory.hh20
46 files changed, 2229 insertions, 2992 deletions
diff --git a/src/mem/ruby/SConsopts b/src/mem/ruby/SConsopts
index 2b325ff33..6c9343346 100644
--- a/src/mem/ruby/SConsopts
+++ b/src/mem/ruby/SConsopts
@@ -37,9 +37,7 @@ sticky_vars.AddVariables(
BoolVariable('NO_VECTOR_BOUNDS_CHECKS', "Don't do bounds checks", True),
BoolVariable('RUBY_DEBUG', "Add debugging stuff to Ruby", False),
('GEMS_ROOT', "Add debugging stuff to Ruby", Dir('..').srcnode().abspath),
- BoolVariable('RUBY_TSO_CHECKER', "Use the Ruby TSO Checker", False)
)
-export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'RUBY_DEBUG', 'GEMS_ROOT',
- 'RUBY_TSO_CHECKER' ]
+export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'RUBY_DEBUG', 'GEMS_ROOT' ]
diff --git a/src/mem/ruby/common/Address.hh b/src/mem/ruby/common/Address.hh
index 73327e617..c495f8d86 100644
--- a/src/mem/ruby/common/Address.hh
+++ b/src/mem/ruby/common/Address.hh
@@ -171,7 +171,7 @@ Address::bitRemove(int small, int big) const
{
physical_address_t mask;
assert((unsigned)big >= (unsigned)small);
-
+
if (small >= ADDRESS_WIDTH - 1) {
return m_address;
} else if (big >= ADDRESS_WIDTH - 1) {
@@ -228,7 +228,7 @@ inline integer_t
Address::memoryModuleIndex() const
{
integer_t index =
- bitSelect(RubySystem::getBlockSizeBits() +
+ bitSelect(RubySystem::getBlockSizeBits() +
RubySystem::getMemorySizeBits(), ADDRESS_WIDTH);
assert (index >= 0);
return index;
diff --git a/src/mem/ruby/eventqueue/RubyEventQueueNode.hh b/src/mem/ruby/eventqueue/RubyEventQueueNode.hh
index c759e7f57..0bdc48e61 100644
--- a/src/mem/ruby/eventqueue/RubyEventQueueNode.hh
+++ b/src/mem/ruby/eventqueue/RubyEventQueueNode.hh
@@ -38,18 +38,18 @@
class RubyEventQueueNode : public Event
{
public:
- RubyEventQueueNode(Consumer* _consumer, RubyEventQueue* _eventq)
+ RubyEventQueueNode(Consumer* _consumer, RubyEventQueue* _eventq)
: m_consumer_ptr(_consumer), m_eventq_ptr(_eventq)
- {
- setFlags(AutoDelete);
+ {
+ setFlags(AutoDelete);
}
void print(std::ostream& out) const;
virtual void
- process()
- {
- m_consumer_ptr->wakeup();
- m_consumer_ptr->removeScheduledWakeupTime(m_eventq_ptr->getTime());
+ process()
+ {
+ m_consumer_ptr->wakeup();
+ m_consumer_ptr->removeScheduledWakeupTime(m_eventq_ptr->getTime());
}
virtual const char *description() const { return "Ruby Event"; }
diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh
index 6d83c3f08..5bb8fc2a4 100644
--- a/src/mem/ruby/libruby.hh
+++ b/src/mem/ruby/libruby.hh
@@ -156,17 +156,17 @@ void libruby_print_stats(std::ostream &out);
/**
* does not return until done
- */
+ */
void libruby_playback_trace(char *trace_filename);
/*
* enables the tracer and opens the trace file
- */
+ */
void libruby_start_tracing(char *record_filename);
/*
* closes the trace file
- */
+ */
void libruby_stop_tracing();
/**
diff --git a/src/mem/ruby/network/Network.cc b/src/mem/ruby/network/Network.cc
index 380f7412d..89b9168b6 100644
--- a/src/mem/ruby/network/Network.cc
+++ b/src/mem/ruby/network/Network.cc
@@ -41,55 +41,57 @@ Network::Network(const Params *p)
m_link_latency = p->link_latency;
m_control_msg_size = p->control_msg_size;
- //
// Total nodes/controllers in network
// Must make sure this is called after the State Machine constructors
- //
- m_nodes = MachineType_base_number(MachineType_NUM);
+ m_nodes = MachineType_base_number(MachineType_NUM);
assert(m_nodes != 0);
assert(m_virtual_networks != 0);
assert(m_topology_ptr != NULL);
- //
// Initialize the controller's network pointers
- //
m_topology_ptr->initNetworkPtr(this);
}
-void Network::init()
+void
+Network::init()
{
- m_data_msg_size = RubySystem::getBlockSizeBytes() + m_control_msg_size;
+ m_data_msg_size = RubySystem::getBlockSizeBytes() + m_control_msg_size;
}
-int Network::MessageSizeType_to_int(MessageSizeType size_type)
+int
+Network::MessageSizeType_to_int(MessageSizeType size_type)
{
- switch(size_type) {
- case MessageSizeType_Undefined:
- ERROR_MSG("Can't convert Undefined MessageSizeType to integer");
- break;
- case MessageSizeType_Control:
- case MessageSizeType_Request_Control:
- case MessageSizeType_Reissue_Control:
- case MessageSizeType_Response_Control:
- case MessageSizeType_Writeback_Control:
- case MessageSizeType_Forwarded_Control:
- case MessageSizeType_Invalidate_Control:
- case MessageSizeType_Unblock_Control:
- case MessageSizeType_Persistent_Control:
- case MessageSizeType_Completion_Control:
- return m_control_msg_size;
- break;
- case MessageSizeType_Data:
- case MessageSizeType_Response_Data:
- case MessageSizeType_ResponseLocal_Data:
- case MessageSizeType_ResponseL2hit_Data:
- case MessageSizeType_Writeback_Data:
- return m_data_msg_size;
- break;
- default:
- ERROR_MSG("Invalid range for type MessageSizeType");
- break;
- }
- return 0;
+ switch(size_type) {
+ case MessageSizeType_Undefined:
+ ERROR_MSG("Can't convert Undefined MessageSizeType to integer");
+ break;
+ case MessageSizeType_Control:
+ case MessageSizeType_Request_Control:
+ case MessageSizeType_Reissue_Control:
+ case MessageSizeType_Response_Control:
+ case MessageSizeType_Writeback_Control:
+ case MessageSizeType_Forwarded_Control:
+ case MessageSizeType_Invalidate_Control:
+ case MessageSizeType_Unblock_Control:
+ case MessageSizeType_Persistent_Control:
+ case MessageSizeType_Completion_Control:
+ return m_control_msg_size;
+ case MessageSizeType_Data:
+ case MessageSizeType_Response_Data:
+ case MessageSizeType_ResponseLocal_Data:
+ case MessageSizeType_ResponseL2hit_Data:
+ case MessageSizeType_Writeback_Data:
+ return m_data_msg_size;
+ default:
+ ERROR_MSG("Invalid range for type MessageSizeType");
+ break;
+ }
+ return 0;
+}
+
+const Vector<Throttle*>*
+Network::getThrottles(NodeID id) const
+{
+ return NULL;
}
diff --git a/src/mem/ruby/network/Network.hh b/src/mem/ruby/network/Network.hh
index be0ab72db..b79c6407e 100644
--- a/src/mem/ruby/network/Network.hh
+++ b/src/mem/ruby/network/Network.hh
@@ -27,106 +27,96 @@
*/
/*
- * Network.hh
- *
- * Description: The Network class is the base class for classes that
- * implement the interconnection network between components
- * (processor/cache components and memory/directory components). The
- * interconnection network as described here is not a physical
- * network, but a programming concept used to implement all
- * communication between components. Thus parts of this 'network'
- * will model the on-chip connections between cache controllers and
- * directory controllers as well as the links between chip and network
- * switches.
- *
- * $Id$
- * */
+ * The Network class is the base class for classes that implement the
+ * interconnection network between components (processor/cache
+ * components and memory/directory components). The interconnection
+ * network as described here is not a physical network, but a
+ * programming concept used to implement all communication between
+ * components. Thus parts of this 'network' will model the on-chip
+ * connections between cache controllers and directory controllers as
+ * well as the links between chip and network switches.
+ */
-#ifndef NETWORK_H
-#define NETWORK_H
+#ifndef __MEM_RUBY_NETWORK_NETWORK_HH__
+#define __MEM_RUBY_NETWORK_NETWORK_HH__
+#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/system/System.hh"
-#include "sim/sim_object.hh"
#include "params/RubyNetwork.hh"
+#include "sim/sim_object.hh"
class NetDest;
class MessageBuffer;
class Throttle;
class Topology;
-class Network : public SimObject {
-public:
- // Constructors
+class Network : public SimObject
+{
+ public:
typedef RubyNetworkParams Params;
Network(const Params *p);
- virtual void init();
-
- // Destructor
- virtual ~Network() {}
-
- // Public Methods
- int getBufferSize() { return m_buffer_size; }
- int getNumberOfVirtualNetworks() { return m_virtual_networks; }
- int getEndpointBandwidth() { return m_endpoint_bandwidth; }
- bool getAdaptiveRouting() {return m_adaptive_routing; }
- int getLinkLatency() { return m_link_latency; }
- int MessageSizeType_to_int(MessageSizeType size_type);
-
-
- // returns the queue requested for the given component
- virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered, int netNumber) = 0;
- virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int netNumber) = 0;
- virtual const Vector<Throttle*>* getThrottles(NodeID id) const { return NULL; }
-
- virtual int getNumNodes() {return 1;}
-
- virtual void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration) = 0;
- virtual void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration) = 0;
- virtual void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration) = 0;
-
- virtual void reset() = 0;
-
- virtual void printStats(ostream& out) const = 0;
- virtual void clearStats() = 0;
- virtual void printConfig(ostream& out) const = 0;
- virtual void print(ostream& out) const = 0;
-
-protected:
-
- // Private Methods
- // Private copy constructor and assignment operator
- Network(const Network& obj);
- Network& operator=(const Network& obj);
-
- // Data Members (m_ prefix)
-protected:
- const string m_name;
- int m_nodes;
- int m_virtual_networks;
- int m_buffer_size;
- int m_endpoint_bandwidth;
- Topology* m_topology_ptr;
- bool m_adaptive_routing;
- int m_link_latency;
- int m_control_msg_size;
- int m_data_msg_size;
+ virtual ~Network() {}
+
+ virtual void init();
+
+ int getBufferSize() { return m_buffer_size; }
+ int getNumberOfVirtualNetworks() { return m_virtual_networks; }
+ int getEndpointBandwidth() { return m_endpoint_bandwidth; }
+ bool getAdaptiveRouting() {return m_adaptive_routing; }
+ int getLinkLatency() { return m_link_latency; }
+ int MessageSizeType_to_int(MessageSizeType size_type);
+
+ // returns the queue requested for the given component
+ virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered,
+ int netNumber) = 0;
+ virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered,
+ int netNumber) = 0;
+ virtual const Vector<Throttle*>* getThrottles(NodeID id) const;
+ virtual int getNumNodes() {return 1;}
+
+ virtual void makeOutLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration) = 0;
+ virtual void makeInLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency,
+ int bw_multiplier, bool isReconfiguration) = 0;
+ virtual void makeInternalLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration) = 0;
+
+ virtual void reset() = 0;
+
+ virtual void printStats(ostream& out) const = 0;
+ virtual void clearStats() = 0;
+ virtual void printConfig(ostream& out) const = 0;
+ virtual void print(ostream& out) const = 0;
+
+ protected:
+ // Private copy constructor and assignment operator
+ Network(const Network& obj);
+ Network& operator=(const Network& obj);
+
+ protected:
+ const string m_name;
+ int m_nodes;
+ int m_virtual_networks;
+ int m_buffer_size;
+ int m_endpoint_bandwidth;
+ Topology* m_topology_ptr;
+ bool m_adaptive_routing;
+ int m_link_latency;
+ int m_control_msg_size;
+ int m_data_msg_size;
};
-// Output operator declaration
-ostream& operator<<(ostream& out, const Network& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const Network& obj)
+inline ostream&
+operator<<(ostream& out, const Network& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << flush;
+ return out;
}
-#endif //NETWORK_H
+#endif // __MEM_RUBY_NETWORK_NETWORK_HH__
diff --git a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc
deleted file mode 100644
index c0190e789..000000000
--- a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-
-#include "mem/ruby/network/simple/HierarchicalSwitchTopology.hh"
-
-// hierarchical switch topology
-void Topology::construct(int fan_out_degree)
-{
- // Make a row of switches with only one input. This extra row makes
- // sure the links out of the nodes have latency and limited
- // bandwidth.
-
- // number of inter-chip switches, i.e. the last row of switches
- Vector<SwitchID> last_level;
- for (int i=0; i<m_nodes; i++) {
- SwitchID new_switch = newSwitchID(); // internal switch id #
- addLink(i, new_switch, m_network_ptr->getLinkLatency());
- last_level.insertAtBottom(new_switch);
- }
-
- // Create Hierarchical Switches
-
- // start from the bottom level and work up to root
- Vector<SwitchID> next_level;
- while(last_level.size() > 1) {
- for (int i=0; i<last_level.size(); i++) {
- if ((i % fan_out_degree) == 0) {
- next_level.insertAtBottom(newSwitchID());
- }
- // Add this link to the last switch we created
- addLink(last_level[i], next_level[next_level.size()-1], m_network_ptr->getLinkLatency());
- }
-
- // Make the current level the last level to get ready for next
- // iteration
- last_level = next_level;
- next_level.clear();
- }
-
- SwitchID root_switch = last_level[0];
-
- Vector<SwitchID> out_level;
- for (int i=0; i<m_nodes; i++) {
- out_level.insertAtBottom(m_nodes+i);
- }
-
- // Build the down network from the endpoints to the root
- while(out_level.size() != 1) {
-
- // A level of switches
- for (int i=0; i<out_level.size(); i++) {
- if ((i % fan_out_degree) == 0) {
- if (out_level.size() > fan_out_degree) {
- next_level.insertAtBottom(newSwitchID());
- } else {
- next_level.insertAtBottom(root_switch);
- }
- }
- // Add this link to the last switch we created
- addLink(next_level[next_level.size()-1], out_level[i], m_network_ptr->getLinkLatency());
- }
-
- // Make the current level the last level to get ready for next
- // iteration
- out_level = next_level;
- next_level.clear();
- }
-}
diff --git a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh b/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh
deleted file mode 100644
index 1b5627206..000000000
--- a/src/mem/ruby/network/simple/HierarchicalSwitchTopology.hh
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#ifndef HIERARCHICALSWITCHTOPOLOGY_H
-#define HIERARCHICALSWITCHTOPOLOGY_H
-
-#include "mem/ruby/network/simple/Topology.hh"
-
-class HierarchicalSwitchTopology : public Topology
-{
-public:
- HierarchicalSwitchTopology(const string & name);
- void init();
-
-protected:
- void construct();
-};
-
-#endif
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index d60c5332c..bddcb8412 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,289 +26,309 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * PerfectSwitch.cc
- *
- * Description: See PerfectSwitch.hh
- *
- * $Id$
- *
- */
-
-
+#include "mem/gems_common/util.hh"
+#include "mem/protocol/Protocol.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/network/simple/PerfectSwitch.hh"
-#include "mem/ruby/slicc_interface/NetworkMessage.hh"
+#include "mem/ruby/network/simple/SimpleNetwork.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/slicc_interface/NetworkMessage.hh"
#include "mem/ruby/system/System.hh"
-#include "mem/ruby/network/simple/SimpleNetwork.hh"
-#include "mem/gems_common/util.hh"
-#include "mem/ruby/buffers/MessageBuffer.hh"
-#include "mem/protocol/Protocol.hh"
const int PRIORITY_SWITCH_LIMIT = 128;
// Operator for helper class
-bool operator<(const LinkOrder& l1, const LinkOrder& l2) {
- return (l1.m_value < l2.m_value);
+bool
+operator<(const LinkOrder& l1, const LinkOrder& l2)
+{
+ return (l1.m_value < l2.m_value);
}
PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
{
- m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
- m_switch_id = sid;
- m_round_robin_start = 0;
- m_network_ptr = network_ptr;
- m_wakeups_wo_switch = 0;
+ m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
+ m_switch_id = sid;
+ m_round_robin_start = 0;
+ m_network_ptr = network_ptr;
+ m_wakeups_wo_switch = 0;
}
-void PerfectSwitch::addInPort(const Vector<MessageBuffer*>& in)
+void
+PerfectSwitch::addInPort(const Vector<MessageBuffer*>& in)
{
- assert(in.size() == m_virtual_networks);
- NodeID port = m_in.size();
- m_in.insertAtBottom(in);
- for (int j = 0; j < m_virtual_networks; j++) {
- m_in[port][j]->setConsumer(this);
- string desc = "[Queue from port " + NodeIDToString(m_switch_id) + " " + NodeIDToString(port) + " " + NodeIDToString(j) + " to PerfectSwitch]";
- m_in[port][j]->setDescription(desc);
- }
+ assert(in.size() == m_virtual_networks);
+ NodeID port = m_in.size();
+ m_in.insertAtBottom(in);
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_in[port][j]->setConsumer(this);
+ string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
+ NodeIDToString(m_switch_id), NodeIDToString(port),
+ NodeIDToString(j));
+ m_in[port][j]->setDescription(desc);
+ }
}
-void PerfectSwitch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry)
+void
+PerfectSwitch::addOutPort(const Vector<MessageBuffer*>& out,
+ const NetDest& routing_table_entry)
{
- assert(out.size() == m_virtual_networks);
+ assert(out.size() == m_virtual_networks);
- // Setup link order
- LinkOrder l;
- l.m_value = 0;
- l.m_link = m_out.size();
- m_link_order.insertAtBottom(l);
-
- // Add to routing table
- m_out.insertAtBottom(out);
- m_routing_table.insertAtBottom(routing_table_entry);
+ // Setup link order
+ LinkOrder l;
+ l.m_value = 0;
+ l.m_link = m_out.size();
+ m_link_order.insertAtBottom(l);
+ // Add to routing table
+ m_out.insertAtBottom(out);
+ m_routing_table.insertAtBottom(routing_table_entry);
}
-void PerfectSwitch::clearRoutingTables()
+void
+PerfectSwitch::clearRoutingTables()
{
- m_routing_table.clear();
+ m_routing_table.clear();
}
-void PerfectSwitch::clearBuffers()
+void
+PerfectSwitch::clearBuffers()
{
- for(int i=0; i<m_in.size(); i++){
- for(int vnet=0; vnet < m_virtual_networks; vnet++) {
- m_in[i][vnet]->clear();
+ for (int i = 0; i < m_in.size(); i++){
+ for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
+ m_in[i][vnet]->clear();
+ }
}
- }
- for(int i=0; i<m_out.size(); i++){
- for(int vnet=0; vnet < m_virtual_networks; vnet++) {
- m_out[i][vnet]->clear();
+ for (int i = 0; i < m_out.size(); i++){
+ for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
+ m_out[i][vnet]->clear();
+ }
}
- }
}
-void PerfectSwitch::reconfigureOutPort(const NetDest& routing_table_entry)
+void
+PerfectSwitch::reconfigureOutPort(const NetDest& routing_table_entry)
{
- m_routing_table.insertAtBottom(routing_table_entry);
+ m_routing_table.insertAtBottom(routing_table_entry);
}
PerfectSwitch::~PerfectSwitch()
{
}
-void PerfectSwitch::wakeup()
+void
+PerfectSwitch::wakeup()
{
-
- DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id);
-
- MsgPtr msg_ptr;
-
- // Give the highest numbered link priority most of the time
- m_wakeups_wo_switch++;
- int highest_prio_vnet = m_virtual_networks-1;
- int lowest_prio_vnet = 0;
- int decrementer = 1;
- NetworkMessage* net_msg_ptr = NULL;
-
- // invert priorities to avoid starvation seen in the component network
- if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
- m_wakeups_wo_switch = 0;
- highest_prio_vnet = 0;
- lowest_prio_vnet = m_virtual_networks-1;
- decrementer = -1;
- }
-
- for (int vnet = highest_prio_vnet; (vnet*decrementer) >= (decrementer*lowest_prio_vnet); vnet -= decrementer) {
-
- // For all components incoming queues
- int incoming = m_round_robin_start; // This is for round-robin scheduling
- m_round_robin_start++;
- if (m_round_robin_start >= m_in.size()) {
- m_round_robin_start = 0;
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id);
+
+ MsgPtr msg_ptr;
+
+ // Give the highest numbered link priority most of the time
+ m_wakeups_wo_switch++;
+ int highest_prio_vnet = m_virtual_networks-1;
+ int lowest_prio_vnet = 0;
+ int decrementer = 1;
+ NetworkMessage* net_msg_ptr = NULL;
+
+ // invert priorities to avoid starvation seen in the component network
+ if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
+ m_wakeups_wo_switch = 0;
+ highest_prio_vnet = 0;
+ lowest_prio_vnet = m_virtual_networks-1;
+ decrementer = -1;
}
- // for all input ports, use round robin scheduling
- for (int counter = 0; counter < m_in.size(); counter++) {
-
- // Round robin scheduling
- incoming++;
- if (incoming >= m_in.size()) {
- incoming = 0;
- }
-
- // temporary vectors to store the routing results
- Vector<LinkID> output_links;
- Vector<NetDest> output_link_destinations;
-
- // Is there a message waiting?
- while (m_in[incoming][vnet]->isReady()) {
-
- DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming);
-
- // Peek at message
- msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
- net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
- DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr);
-
- output_links.clear();
- output_link_destinations.clear();
- NetDest msg_destinations = net_msg_ptr->getInternalDestination();
-
- // Unfortunately, the token-protocol sends some
- // zero-destination messages, so this assert isn't valid
- // assert(msg_destinations.count() > 0);
-
- assert(m_link_order.size() == m_routing_table.size());
- assert(m_link_order.size() == m_out.size());
-
- if (m_network_ptr->getAdaptiveRouting()) {
- if (m_network_ptr->isVNetOrdered(vnet)) {
- // Don't adaptively route
- for (int outlink=0; outlink<m_out.size(); outlink++) {
- m_link_order[outlink].m_link = outlink;
- m_link_order[outlink].m_value = 0;
- }
- } else {
- // Find how clogged each link is
- for (int outlink=0; outlink<m_out.size(); outlink++) {
- int out_queue_length = 0;
- for (int v=0; v<m_virtual_networks; v++) {
- out_queue_length += m_out[outlink][v]->getSize();
- }
- m_link_order[outlink].m_link = outlink;
- m_link_order[outlink].m_value = 0;
- m_link_order[outlink].m_value |= (out_queue_length << 8);
- m_link_order[outlink].m_value |= (random() & 0xff);
- }
- m_link_order.sortVector(); // Look at the most empty link first
- }
- }
-
- for (int i=0; i<m_routing_table.size(); i++) {
- // pick the next link to look at
- int link = m_link_order[i].m_link;
-
- DEBUG_EXPR(NETWORK_COMP, MedPrio, m_routing_table[link]);
-
- if (msg_destinations.intersectionIsNotEmpty(m_routing_table[link])) {
-
- // Remember what link we're using
- output_links.insertAtBottom(link);
-
- // Need to remember which destinations need this message
- // in another vector. This Set is the intersection of the
- // routing_table entry and the current destination set.
- // The intersection must not be empty, since we are inside "if"
- output_link_destinations.insertAtBottom(msg_destinations.AND(m_routing_table[link]));
-
- // Next, we update the msg_destination not to include
- // those nodes that were already handled by this link
- msg_destinations.removeNetDest(m_routing_table[link]);
- }
- }
-
- assert(msg_destinations.count() == 0);
- //assert(output_links.size() > 0);
-
- // Check for resources - for all outgoing queues
- bool enough = true;
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
- enough = enough && m_out[outgoing][vnet]->areNSlotsAvailable(1);
- DEBUG_MSG(NETWORK_COMP, HighPrio, "checking if node is blocked");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing);
- DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet);
- DEBUG_EXPR(NETWORK_COMP, HighPrio, enough);
- }
-
- // There were not enough resources
- if(!enough) {
- g_eventQueue_ptr->scheduleEvent(this, 1);
- DEBUG_MSG(NETWORK_COMP, HighPrio, "Can't deliver message to anyone since a node is blocked");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr);
- break; // go to next incoming port
+ // For all components incoming queues
+ for (int vnet = highest_prio_vnet;
+ (vnet * decrementer) >= (decrementer * lowest_prio_vnet);
+ vnet -= decrementer) {
+
+ // This is for round-robin scheduling
+ int incoming = m_round_robin_start;
+ m_round_robin_start++;
+ if (m_round_robin_start >= m_in.size()) {
+ m_round_robin_start = 0;
}
- MsgPtr unmodified_msg_ptr;
-
- if (output_links.size() > 1) {
- // If we are sending this message down more than one link
- // (size>1), we need to make a copy of the message so each
- // branch can have a different internal destination
- // we need to create an unmodified MsgPtr because the MessageBuffer enqueue func
- // will modify the message
- unmodified_msg_ptr = *(msg_ptr.ref()); // This magic line creates a private copy of the message
- }
+ // for all input ports, use round robin scheduling
+ for (int counter = 0; counter < m_in.size(); counter++) {
+ // Round robin scheduling
+ incoming++;
+ if (incoming >= m_in.size()) {
+ incoming = 0;
+ }
- // Enqueue it - for all outgoing queues
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
-
- if (i > 0) {
- msg_ptr = *(unmodified_msg_ptr.ref()); // create a private copy of the unmodified message
- }
-
- // Change the internal destination set of the message so it
- // knows which destinations this link is responsible for.
- net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
- net_msg_ptr->getInternalDestination() = output_link_destinations[i];
-
- // Enqeue msg
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
- DEBUG_MSG(NETWORK_COMP,HighPrio,"switch: " + int_to_string(m_switch_id)
- + " enqueuing net msg from inport[" + int_to_string(incoming) + "]["
- + int_to_string(vnet) +"] to outport [" + int_to_string(outgoing)
- + "][" + int_to_string(vnet) +"]"
- + " time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
-
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ // temporary vectors to store the routing results
+ Vector<LinkID> output_links;
+ Vector<NetDest> output_link_destinations;
+
+ // Is there a message waiting?
+ while (m_in[incoming][vnet]->isReady()) {
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming);
+
+ // Peek at message
+ msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
+ net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr);
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_dsts =
+ net_msg_ptr->getInternalDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_dsts.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (m_network_ptr->getAdaptiveRouting()) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int out = 0; out < m_out.size(); out++) {
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int out = 0; out < m_out.size(); out++) {
+ int out_queue_length = 0;
+ for (int v = 0; v < m_virtual_networks; v++) {
+ out_queue_length += m_out[out][v]->getSize();
+ }
+ int value =
+ (out_queue_length << 8) | (random() & 0xff);
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = value;
+ }
+
+ // Look at the most empty link first
+ m_link_order.sortVector();
+ }
+ }
+
+ for (int i = 0; i < m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+ NetDest dst = m_routing_table[link];
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, dst);
+
+ if (!msg_dsts.intersectionIsNotEmpty(dst))
+ continue;
+
+ // Remember what link we're using
+ output_links.insertAtBottom(link);
+
+ // Need to remember which destinations need this
+ // message in another vector. This Set is the
+ // intersection of the routing_table entry and the
+ // current destination set. The intersection must
+ // not be empty, since we are inside "if"
+ output_link_destinations.insertAtBottom(msg_dsts.AND(dst));
+
+ // Next, we update the msg_destination not to
+ // include those nodes that were already handled
+ // by this link
+ msg_dsts.removeNetDest(dst);
+ }
+
+ assert(msg_dsts.count() == 0);
+ //assert(output_links.size() > 0);
+
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i = 0; i < output_links.size(); i++) {
+ int outgoing = output_links[i];
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ enough = false;
+ DEBUG_MSG(NETWORK_COMP, HighPrio,
+ "checking if node is blocked");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing);
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet);
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, enough);
+ }
+
+ // There were not enough resources
+ if (!enough) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ DEBUG_MSG(NETWORK_COMP, HighPrio,
+ "Can't deliver message since a node is blocked");
+ DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr);
+ break; // go to next incoming port
+ }
+
+ MsgPtr unmodified_msg_ptr;
+
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than
+ // one link (size>1), we need to make a copy of
+ // the message so each branch can have a different
+ // internal destination we need to create an
+ // unmodified MsgPtr because the MessageBuffer
+ // enqueue func will modify the message
+
+ // This magic line creates a private copy of the
+ // message
+ unmodified_msg_ptr = *(msg_ptr.ref());
+ }
+
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
+
+ if (i > 0) {
+ // create a private copy of the unmodified
+ // message
+ msg_ptr = *(unmodified_msg_ptr.ref());
+ }
+
+ // Change the internal destination set of the
+ // message so it knows which destinations this
+ // link is responsible for.
+ net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.ref());
+ net_msg_ptr->getInternalDestination() =
+ output_link_destinations[i];
+
+ // Enqeue msg
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ DEBUG_MSG(NETWORK_COMP, HighPrio,
+ csprintf("switch: %d enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d] time: %d.",
+ m_switch_id, incoming, vnet, outgoing, vnet,
+ g_eventQueue_ptr->getTime()));
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
+ }
+
+ // Dequeue msg
+ m_in[incoming][vnet]->pop();
+ }
}
-
- // Dequeue msg
- m_in[incoming][vnet]->pop();
- }
}
- }
}
-void PerfectSwitch::printStats(std::ostream& out) const
+void
+PerfectSwitch::printStats(std::ostream& out) const
{
- out << "PerfectSwitch printStats" << endl;
+ out << "PerfectSwitch printStats" << endl;
}
-void PerfectSwitch::clearStats()
+void
+PerfectSwitch::clearStats()
{
}
-void PerfectSwitch::printConfig(std::ostream& out) const
+void
+PerfectSwitch::printConfig(std::ostream& out) const
{
}
-void PerfectSwitch::print(std::ostream& out) const
+void
+PerfectSwitch::print(std::ostream& out) const
{
- out << "[PerfectSwitch " << m_switch_id << "]";
+ out << "[PerfectSwitch " << m_switch_id << "]";
}
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
index 2956e261a..68bf0df9c 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,93 +27,79 @@
*/
/*
- * $Id$
- *
- * Description: Perfect switch, of course it is perfect and no latency or what
- * so ever. Every cycle it is woke up and perform all the
- * necessary routings that must be done. Note, this switch also
- * has number of input ports/output ports and has a routing table
- * as well.
- *
+ * Perfect switch, of course it is perfect and no latency or what so
+ * ever. Every cycle it is woke up and perform all the necessary
+ * routings that must be done. Note, this switch also has number of
+ * input ports/output ports and has a routing table as well.
*/
-#ifndef PerfectSwitch_H
-#define PerfectSwitch_H
+#ifndef __MEM_RUBY_NETWORK_SIMPLE_PERFECTSWITCH_HH__
+#define __MEM_RUBY_NETWORK_SIMPLE_PERFECTSWITCH_HH__
#include <iostream>
-#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
class MessageBuffer;
class NetDest;
class SimpleNetwork;
-class LinkOrder {
-public:
- int m_link;
- int m_value;
+struct LinkOrder
+{
+ int m_link;
+ int m_value;
};
-class PerfectSwitch : public Consumer {
-public:
- // Constructors
-
- // constructor specifying the number of ports
- PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr);
- void addInPort(const Vector<MessageBuffer*>& in);
- void addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry);
- void clearRoutingTables();
- void clearBuffers();
- void reconfigureOutPort(const NetDest& routing_table_entry);
- int getInLinks() const { return m_in.size(); }
- int getOutLinks() const { return m_out.size(); }
-
- // Destructor
- ~PerfectSwitch();
-
- // Public Methods
- void wakeup();
-
- void printStats(std::ostream& out) const;
- void clearStats();
- void printConfig(std::ostream& out) const;
-
- void print(std::ostream& out) const;
-private:
-
- // Private copy constructor and assignment operator
- PerfectSwitch(const PerfectSwitch& obj);
- PerfectSwitch& operator=(const PerfectSwitch& obj);
-
- // Data Members (m_ prefix)
- SwitchID m_switch_id;
-
- // vector of queues from the components
- Vector<Vector<MessageBuffer*> > m_in;
- Vector<Vector<MessageBuffer*> > m_out;
- Vector<NetDest> m_routing_table;
- Vector<LinkOrder> m_link_order;
- int m_virtual_networks;
- int m_round_robin_start;
- int m_wakeups_wo_switch;
- SimpleNetwork* m_network_ptr;
+class PerfectSwitch : public Consumer
+{
+ public:
+ PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr);
+ ~PerfectSwitch();
+
+ void addInPort(const Vector<MessageBuffer*>& in);
+ void addOutPort(const Vector<MessageBuffer*>& out,
+ const NetDest& routing_table_entry);
+ void clearRoutingTables();
+ void clearBuffers();
+ void reconfigureOutPort(const NetDest& routing_table_entry);
+ int getInLinks() const { return m_in.size(); }
+ int getOutLinks() const { return m_out.size(); }
+
+ void wakeup();
+
+ void printStats(std::ostream& out) const;
+ void clearStats();
+ void printConfig(std::ostream& out) const;
+
+ void print(std::ostream& out) const;
+
+ private:
+ // Private copy constructor and assignment operator
+ PerfectSwitch(const PerfectSwitch& obj);
+ PerfectSwitch& operator=(const PerfectSwitch& obj);
+
+ SwitchID m_switch_id;
+
+ // vector of queues from the components
+ Vector<Vector<MessageBuffer*> > m_in;
+ Vector<Vector<MessageBuffer*> > m_out;
+ Vector<NetDest> m_routing_table;
+ Vector<LinkOrder> m_link_order;
+ int m_virtual_networks;
+ int m_round_robin_start;
+ int m_wakeups_wo_switch;
+ SimpleNetwork* m_network_ptr;
};
-// Output operator declaration
-std::ostream& operator<<(std::ostream& out, const PerfectSwitch& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-std::ostream& operator<<(std::ostream& out, const PerfectSwitch& obj)
+inline std::ostream&
+operator<<(std::ostream& out, const PerfectSwitch& obj)
{
- obj.print(out);
- out << std::flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif //PerfectSwitch_H
+#endif // __MEM_RUBY_NETWORK_SIMPLE_PERFECTSWITCH_HH__
diff --git a/src/mem/ruby/network/simple/PtToPtTopology.hh b/src/mem/ruby/network/simple/PtToPtTopology.hh
deleted file mode 100644
index f3c57d0a0..000000000
--- a/src/mem/ruby/network/simple/PtToPtTopology.hh
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#ifndef PTTOPTTOPOLOGY_H
-#define PTTOPTTOPOLOGY_H
-
-#include "mem/ruby/network/simple/Topology.hh"
-
-class PtToPtTopology : public Topology
-{
-public:
- PtToPtTopology(const string & name);
- void init();
-
-protected:
- void construct();
-};
-
-#endif
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc
index 2de8d07e5..26924c2e7 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.cc
+++ b/src/mem/ruby/network/simple/SimpleNetwork.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,248 +26,262 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * SimpleNetwork.cc
- *
- * Description: See SimpleNetwork.hh
- *
- * $Id$
- *
- */
-
+#include "mem/gems_common/Map.hh"
+#include "mem/protocol/MachineType.hh"
+#include "mem/protocol/Protocol.hh"
+#include "mem/protocol/TopologyType.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
+#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/network/simple/SimpleNetwork.hh"
-#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/system/System.hh"
#include "mem/ruby/network/simple/Switch.hh"
-#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/network/simple/Topology.hh"
-#include "mem/protocol/TopologyType.hh"
-#include "mem/protocol/MachineType.hh"
-#include "mem/ruby/buffers/MessageBuffer.hh"
-#include "mem/protocol/Protocol.hh"
-#include "mem/gems_common/Map.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/system/System.hh"
+#if 0
// ***BIG HACK*** - This is actually code that _should_ be in Network.cc
// Note: Moved to Princeton Network
// calls new to abstract away from the network
-/*
-Network* Network::createNetwork(int nodes)
+Network*
+Network::createNetwork(int nodes)
{
- return new SimpleNetwork(nodes);
+ return new SimpleNetwork(nodes);
}
-*/
+#endif
SimpleNetwork::SimpleNetwork(const Params *p)
: Network(p)
{
- //
- // Note: the parent Network Object constructor is called before the
- // SimpleNetwork child constructor. Therefore, the member variables
- // used below should already be initialized.
- //
-
- m_endpoint_switches.setSize(m_nodes);
-
- m_in_use.setSize(m_virtual_networks);
- m_ordered.setSize(m_virtual_networks);
- for (int i = 0; i < m_virtual_networks; i++) {
- m_in_use[i] = false;
- m_ordered[i] = false;
- }
-
- // Allocate to and from queues
- m_toNetQueues.setSize(m_nodes);
- m_fromNetQueues.setSize(m_nodes);
- for (int node = 0; node < m_nodes; node++) {
- m_toNetQueues[node].setSize(m_virtual_networks);
- m_fromNetQueues[node].setSize(m_virtual_networks);
- for (int j = 0; j < m_virtual_networks; j++) {
- m_toNetQueues[node][j] = new MessageBuffer(
- "toNet node "+int_to_string(node)+" j "+int_to_string(j));
- m_fromNetQueues[node][j] = new MessageBuffer(
- "fromNet node "+int_to_string(node)+" j "+int_to_string(j));
+ // Note: the parent Network Object constructor is called before the
+ // SimpleNetwork child constructor. Therefore, the member variables
+ // used below should already be initialized.
+
+ m_endpoint_switches.setSize(m_nodes);
+
+ m_in_use.setSize(m_virtual_networks);
+ m_ordered.setSize(m_virtual_networks);
+ for (int i = 0; i < m_virtual_networks; i++) {
+ m_in_use[i] = false;
+ m_ordered[i] = false;
+ }
+
+ // Allocate to and from queues
+ m_toNetQueues.setSize(m_nodes);
+ m_fromNetQueues.setSize(m_nodes);
+ for (int node = 0; node < m_nodes; node++) {
+ m_toNetQueues[node].setSize(m_virtual_networks);
+ m_fromNetQueues[node].setSize(m_virtual_networks);
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_toNetQueues[node][j] = new MessageBuffer(
+ "toNet node "+int_to_string(node)+" j "+int_to_string(j));
+ m_fromNetQueues[node][j] = new MessageBuffer(
+ "fromNet node "+int_to_string(node)+" j "+int_to_string(j));
+ }
}
- }
}
-void SimpleNetwork::init()
+void
+SimpleNetwork::init()
{
+ Network::init();
+
+ // The topology pointer should have already been initialized in
+ // the parent class network constructor.
+ assert(m_topology_ptr != NULL);
+ int number_of_switches = m_topology_ptr->numSwitches();
+ for (int i = 0; i < number_of_switches; i++) {
+ m_switch_ptr_vector.insertAtBottom(new Switch(i, this));
+ }
- Network::init();
-
- //
- // The topology pointer should have already been initialized in the parent
- // class network constructor.
- //
- assert(m_topology_ptr != NULL);
- int number_of_switches = m_topology_ptr->numSwitches();
- for (int i=0; i<number_of_switches; i++) {
- m_switch_ptr_vector.insertAtBottom(new Switch(i, this));
- }
- m_topology_ptr->createLinks(this, false); // false because this isn't a reconfiguration
+ // false because this isn't a reconfiguration
+ m_topology_ptr->createLinks(this, false);
}
-void SimpleNetwork::reset()
+void
+SimpleNetwork::reset()
{
- for (int node = 0; node < m_nodes; node++) {
- for (int j = 0; j < m_virtual_networks; j++) {
- m_toNetQueues[node][j]->clear();
- m_fromNetQueues[node][j]->clear();
+ for (int node = 0; node < m_nodes; node++) {
+ for (int j = 0; j < m_virtual_networks; j++) {
+ m_toNetQueues[node][j]->clear();
+ m_fromNetQueues[node][j]->clear();
+ }
}
- }
- for(int i=0; i<m_switch_ptr_vector.size(); i++){
- m_switch_ptr_vector[i]->clearBuffers();
- }
+ for(int i = 0; i < m_switch_ptr_vector.size(); i++){
+ m_switch_ptr_vector[i]->clearBuffers();
+ }
}
SimpleNetwork::~SimpleNetwork()
{
- for (int i = 0; i < m_nodes; i++) {
- m_toNetQueues[i].deletePointers();
- m_fromNetQueues[i].deletePointers();
- }
- m_switch_ptr_vector.deletePointers();
- m_buffers_to_free.deletePointers();
- // delete m_topology_ptr;
+ for (int i = 0; i < m_nodes; i++) {
+ m_toNetQueues[i].deletePointers();
+ m_fromNetQueues[i].deletePointers();
+ }
+ m_switch_ptr_vector.deletePointers();
+ m_buffers_to_free.deletePointers();
+ // delete m_topology_ptr;
}
// From a switch to an endpoint node
-void SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+void
+SimpleNetwork::makeOutLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration)
{
- assert(dest < m_nodes);
- assert(src < m_switch_ptr_vector.size());
- assert(m_switch_ptr_vector[src] != NULL);
- if(!isReconfiguration){
- m_switch_ptr_vector[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry, link_latency, bw_multiplier);
+ assert(dest < m_nodes);
+ assert(src < m_switch_ptr_vector.size());
+ assert(m_switch_ptr_vector[src] != NULL);
+
+ if (isReconfiguration) {
+ m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
+ return;
+ }
+
+ m_switch_ptr_vector[src]->addOutPort(m_fromNetQueues[dest],
+ routing_table_entry, link_latency, bw_multiplier);
m_endpoint_switches[dest] = m_switch_ptr_vector[src];
- } else {
- m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
- }
}
// From an endpoint node to a switch
-void SimpleNetwork::makeInLink(NodeID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration)
+void
+SimpleNetwork::makeInLink(NodeID src, SwitchID dest,
+ const NetDest& routing_table_entry, int link_latency, int bw_multiplier,
+ bool isReconfiguration)
{
- assert(src < m_nodes);
- if(!isReconfiguration){
+ assert(src < m_nodes);
+ if (isReconfiguration) {
+ // do nothing
+ return;
+ }
+
m_switch_ptr_vector[dest]->addInPort(m_toNetQueues[src]);
- } else {
- // do nothing
- }
}
// From a switch to a switch
-void SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+void
+SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration)
{
- if(!isReconfiguration){
+ if (isReconfiguration) {
+ m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
+ return;
+ }
+
// Create a set of new MessageBuffers
Vector<MessageBuffer*> queues;
for (int i = 0; i < m_virtual_networks; i++) {
- // allocate a buffer
- MessageBuffer* buffer_ptr = new MessageBuffer;
- buffer_ptr->setOrdering(true);
- if (m_buffer_size > 0) {
- buffer_ptr->setSize(m_buffer_size);
- }
- queues.insertAtBottom(buffer_ptr);
- // remember to deallocate it
- m_buffers_to_free.insertAtBottom(buffer_ptr);
+ // allocate a buffer
+ MessageBuffer* buffer_ptr = new MessageBuffer;
+ buffer_ptr->setOrdering(true);
+ if (m_buffer_size > 0) {
+ buffer_ptr->setSize(m_buffer_size);
+ }
+ queues.insertAtBottom(buffer_ptr);
+ // remember to deallocate it
+ m_buffers_to_free.insertAtBottom(buffer_ptr);
}
-
// Connect it to the two switches
m_switch_ptr_vector[dest]->addInPort(queues);
- m_switch_ptr_vector[src]->addOutPort(queues, routing_table_entry, link_latency, bw_multiplier);
- } else {
- m_switch_ptr_vector[src]->reconfigureOutPort(routing_table_entry);
- }
+ m_switch_ptr_vector[src]->addOutPort(queues, routing_table_entry,
+ link_latency, bw_multiplier);
}
-void SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
+void
+SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
{
- ASSERT(id < m_nodes);
- ASSERT(network_num < m_virtual_networks);
+ ASSERT(id < m_nodes);
+ ASSERT(network_num < m_virtual_networks);
- if (ordered) {
- m_ordered[network_num] = true;
- }
- m_in_use[network_num] = true;
+ if (ordered) {
+ m_ordered[network_num] = true;
+ }
+ m_in_use[network_num] = true;
}
-MessageBuffer* SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num)
+MessageBuffer*
+SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num)
{
- checkNetworkAllocation(id, ordered, network_num);
- return m_toNetQueues[id][network_num];
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_toNetQueues[id][network_num];
}
-MessageBuffer* SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num)
+MessageBuffer*
+SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num)
{
- checkNetworkAllocation(id, ordered, network_num);
- return m_fromNetQueues[id][network_num];
+ checkNetworkAllocation(id, ordered, network_num);
+ return m_fromNetQueues[id][network_num];
}
-const Vector<Throttle*>* SimpleNetwork::getThrottles(NodeID id) const
+const Vector<Throttle*>*
+SimpleNetwork::getThrottles(NodeID id) const
{
- assert(id >= 0);
- assert(id < m_nodes);
- assert(m_endpoint_switches[id] != NULL);
- return m_endpoint_switches[id]->getThrottles();
+ assert(id >= 0);
+ assert(id < m_nodes);
+ assert(m_endpoint_switches[id] != NULL);
+ return m_endpoint_switches[id]->getThrottles();
}
-void SimpleNetwork::printStats(ostream& out) const
+void
+SimpleNetwork::printStats(ostream& out) const
{
- out << endl;
- out << "Network Stats" << endl;
- out << "-------------" << endl;
- out << endl;
- for(int i=0; i<m_switch_ptr_vector.size(); i++) {
- m_switch_ptr_vector[i]->printStats(out);
- }
- m_topology_ptr->printStats(out);
+ out << endl;
+ out << "Network Stats" << endl;
+ out << "-------------" << endl;
+ out << endl;
+ for (int i = 0; i < m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->printStats(out);
+ }
+ m_topology_ptr->printStats(out);
}
-void SimpleNetwork::clearStats()
+void
+SimpleNetwork::clearStats()
{
- for(int i=0; i<m_switch_ptr_vector.size(); i++) {
- m_switch_ptr_vector[i]->clearStats();
- }
- m_topology_ptr->clearStats();
+ for (int i = 0; i < m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->clearStats();
+ }
+ m_topology_ptr->clearStats();
}
-void SimpleNetwork::printConfig(ostream& out) const
+void
+SimpleNetwork::printConfig(ostream& out) const
{
- out << endl;
- out << "Network Configuration" << endl;
- out << "---------------------" << endl;
- out << "network: SIMPLE_NETWORK" << endl;
- out << "topology: " << m_topology_ptr->getName() << endl;
- out << endl;
-
- for (int i = 0; i < m_virtual_networks; i++) {
- out << "virtual_net_" << i << ": ";
- if (m_in_use[i]) {
- out << "active, ";
- if (m_ordered[i]) {
- out << "ordered" << endl;
- } else {
- out << "unordered" << endl;
- }
- } else {
- out << "inactive" << endl;
+ out << endl;
+ out << "Network Configuration" << endl;
+ out << "---------------------" << endl;
+ out << "network: SIMPLE_NETWORK" << endl;
+ out << "topology: " << m_topology_ptr->getName() << endl;
+ out << endl;
+
+ for (int i = 0; i < m_virtual_networks; i++) {
+ out << "virtual_net_" << i << ": ";
+ if (m_in_use[i]) {
+ out << "active, ";
+ if (m_ordered[i]) {
+ out << "ordered" << endl;
+ } else {
+ out << "unordered" << endl;
+ }
+ } else {
+ out << "inactive" << endl;
+ }
+ }
+ out << endl;
+
+ for(int i = 0; i < m_switch_ptr_vector.size(); i++) {
+ m_switch_ptr_vector[i]->printConfig(out);
}
- }
- out << endl;
- for(int i=0; i<m_switch_ptr_vector.size(); i++) {
- m_switch_ptr_vector[i]->printConfig(out);
- }
- m_topology_ptr->printConfig(out);
+ m_topology_ptr->printConfig(out);
}
-void SimpleNetwork::print(ostream& out) const
+void
+SimpleNetwork::print(ostream& out) const
{
- out << "[SimpleNetwork]";
+ out << "[SimpleNetwork]";
}
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.hh b/src/mem/ruby/network/simple/SimpleNetwork.hh
index 76070538f..d8ec89d49 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.hh
+++ b/src/mem/ruby/network/simple/SimpleNetwork.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,9 +27,7 @@
*/
/*
- * SimpleNetwork.hh
- *
- * Description: The SimpleNetwork class implements the interconnection
+ * The SimpleNetwork class implements the interconnection
* SimpleNetwork between components (processor/cache components and
* memory/directory components). The interconnection network as
* described here is not a physical network, but a programming concept
@@ -61,20 +58,17 @@
* abstract Network class take a enumeration parameter, and based on
* that to initial proper network. Or even better, just make the ruby
* system initializer choose the proper network to initiate.
- *
- * $Id$
- *
*/
-#ifndef SIMPLENETWORK_H
-#define SIMPLENETWORK_H
+#ifndef __MEM_RUBY_NETWORK_SIMPLE_SIMPLENETWORK_HH__
+#define __MEM_RUBY_NETWORK_SIMPLE_SIMPLENETWORK_HH__
-#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/Global.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "sim/sim_object.hh"
#include "params/SimpleNetwork.hh"
+#include "sim/sim_object.hh"
class NetDest;
class MessageBuffer;
@@ -82,78 +76,74 @@ class Throttle;
class Switch;
class Topology;
-class SimpleNetwork : public Network {
-public:
- // Constructors
+class SimpleNetwork : public Network
+{
+ public:
typedef SimpleNetworkParams Params;
SimpleNetwork(const Params *p);
-
- // Destructor
- ~SimpleNetwork();
-
- void init();
-
- // Public Methods
- void printStats(ostream& out) const;
- void clearStats();
- void printConfig(ostream& out) const;
-
- void reset();
-
- // returns the queue requested for the given component
- MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
- MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
- virtual const Vector<Throttle*>* getThrottles(NodeID id) const;
-
- bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
- bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
-
- int getNumNodes() {return m_nodes; }
-
- // Methods used by Topology to setup the network
- void makeOutLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
- void makeInLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int bw_multiplier, bool isReconfiguration);
- void makeInternalLink(SwitchID src, NodeID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration);
-
- void print(ostream& out) const;
-private:
- void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
- void addLink(SwitchID src, SwitchID dest, int link_latency);
- void makeLink(SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency);
- SwitchID createSwitch();
- void makeTopology();
- void linkTopology();
-
-
- // Private copy constructor and assignment operator
- SimpleNetwork(const SimpleNetwork& obj);
- SimpleNetwork& operator=(const SimpleNetwork& obj);
-
- // Data Members (m_ prefix)
-
- // vector of queues from the components
- Vector<Vector<MessageBuffer*> > m_toNetQueues;
- Vector<Vector<MessageBuffer*> > m_fromNetQueues;
-
- Vector<bool> m_in_use;
- Vector<bool> m_ordered;
- Vector<Switch*> m_switch_ptr_vector;
- Vector<MessageBuffer*> m_buffers_to_free;
- Vector<Switch*> m_endpoint_switches;
+ ~SimpleNetwork();
+
+ void init();
+
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+
+ void reset();
+
+ // returns the queue requested for the given component
+ MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num);
+ MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num);
+ virtual const Vector<Throttle*>* getThrottles(NodeID id) const;
+
+ bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
+ bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
+
+ int getNumNodes() {return m_nodes; }
+
+ // Methods used by Topology to setup the network
+ void makeOutLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration);
+ void makeInLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency,
+ int bw_multiplier, bool isReconfiguration);
+ void makeInternalLink(SwitchID src, NodeID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration);
+
+ void print(ostream& out) const;
+
+ private:
+ void checkNetworkAllocation(NodeID id, bool ordered, int network_num);
+ void addLink(SwitchID src, SwitchID dest, int link_latency);
+ void makeLink(SwitchID src, SwitchID dest,
+ const NetDest& routing_table_entry, int link_latency);
+ SwitchID createSwitch();
+ void makeTopology();
+ void linkTopology();
+
+ // Private copy constructor and assignment operator
+ SimpleNetwork(const SimpleNetwork& obj);
+ SimpleNetwork& operator=(const SimpleNetwork& obj);
+
+ // vector of queues from the components
+ Vector<Vector<MessageBuffer*> > m_toNetQueues;
+ Vector<Vector<MessageBuffer*> > m_fromNetQueues;
+
+ Vector<bool> m_in_use;
+ Vector<bool> m_ordered;
+ Vector<Switch*> m_switch_ptr_vector;
+ Vector<MessageBuffer*> m_buffers_to_free;
+ Vector<Switch*> m_endpoint_switches;
};
-// Output operator declaration
-ostream& operator<<(ostream& out, const SimpleNetwork& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const SimpleNetwork& obj)
+inline ostream&
+operator<<(ostream& out, const SimpleNetwork& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << flush;
+ return out;
}
-#endif //SIMPLENETWORK_H
+#endif // __MEM_RUBY_NETWORK_SIMPLE_SIMPLENETWORK_HH__
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index 88695250c..30403cd67 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,182 +26,194 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * Switch.cc
- *
- * Description: See Switch.hh
- *
- * $Id$
- *
- */
-
-
-#include "mem/ruby/network/simple/Switch.hh"
-#include "mem/ruby/network/simple/PerfectSwitch.hh"
-#include "mem/ruby/buffers/MessageBuffer.hh"
-#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/protocol/MessageSizeType.hh"
-#include "mem/ruby/network/Network.hh"
#include "mem/protocol/Protocol.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
+#include "mem/ruby/network/Network.hh"
+#include "mem/ruby/network/simple/PerfectSwitch.hh"
+#include "mem/ruby/network/simple/Switch.hh"
+#include "mem/ruby/network/simple/Throttle.hh"
Switch::Switch(SwitchID sid, SimpleNetwork* network_ptr)
{
- m_perfect_switch_ptr = new PerfectSwitch(sid, network_ptr);
- m_switch_id = sid;
- m_throttles.setSize(0);
+ m_perfect_switch_ptr = new PerfectSwitch(sid, network_ptr);
+ m_switch_id = sid;
+ m_throttles.setSize(0);
}
Switch::~Switch()
{
- delete m_perfect_switch_ptr;
+ delete m_perfect_switch_ptr;
- // Delete throttles (one per output port)
- m_throttles.deletePointers();
+ // Delete throttles (one per output port)
+ m_throttles.deletePointers();
- // Delete MessageBuffers
- m_buffers_to_free.deletePointers();
+ // Delete MessageBuffers
+ m_buffers_to_free.deletePointers();
}
-void Switch::addInPort(const Vector<MessageBuffer*>& in)
+void
+Switch::addInPort(const Vector<MessageBuffer*>& in)
{
- m_perfect_switch_ptr->addInPort(in);
+ m_perfect_switch_ptr->addInPort(in);
}
-void Switch::addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
+void
+Switch::addOutPort(const Vector<MessageBuffer*>& out,
+ const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
{
- Throttle* throttle_ptr = NULL;
-
- // Create a throttle
- throttle_ptr = new Throttle(m_switch_id, m_throttles.size(), link_latency, bw_multiplier);
- m_throttles.insertAtBottom(throttle_ptr);
-
- // Create one buffer per vnet (these are intermediaryQueues)
- Vector<MessageBuffer*> intermediateBuffers;
- for (int i=0; i<out.size(); i++) {
- MessageBuffer* buffer_ptr = new MessageBuffer;
- // Make these queues ordered
- buffer_ptr->setOrdering(true);
- Network* net_ptr = RubySystem::getNetwork();
- if(net_ptr->getBufferSize() > 0) {
- buffer_ptr->setSize(net_ptr->getBufferSize());
- }
- intermediateBuffers.insertAtBottom(buffer_ptr);
- m_buffers_to_free.insertAtBottom(buffer_ptr);
+ Throttle* throttle_ptr = NULL;
+
+ // Create a throttle
+ throttle_ptr = new Throttle(m_switch_id, m_throttles.size(), link_latency,
+ bw_multiplier);
+ m_throttles.insertAtBottom(throttle_ptr);
+
+ // Create one buffer per vnet (these are intermediaryQueues)
+ Vector<MessageBuffer*> intermediateBuffers;
+ for (int i = 0; i < out.size(); i++) {
+ MessageBuffer* buffer_ptr = new MessageBuffer;
+ // Make these queues ordered
+ buffer_ptr->setOrdering(true);
+ Network* net_ptr = RubySystem::getNetwork();
+ if (net_ptr->getBufferSize() > 0) {
+ buffer_ptr->setSize(net_ptr->getBufferSize());
+ }
+ intermediateBuffers.insertAtBottom(buffer_ptr);
+ m_buffers_to_free.insertAtBottom(buffer_ptr);
}
- // Hook the queues to the PerfectSwitch
- m_perfect_switch_ptr->addOutPort(intermediateBuffers, routing_table_entry);
-
- // Hook the queues to the Throttle
- throttle_ptr->addLinks(intermediateBuffers, out);
+ // Hook the queues to the PerfectSwitch
+ m_perfect_switch_ptr->addOutPort(intermediateBuffers, routing_table_entry);
+ // Hook the queues to the Throttle
+ throttle_ptr->addLinks(intermediateBuffers, out);
}
-void Switch::clearRoutingTables()
+void
+Switch::clearRoutingTables()
{
- m_perfect_switch_ptr->clearRoutingTables();
+ m_perfect_switch_ptr->clearRoutingTables();
}
-void Switch::clearBuffers()
+void
+Switch::clearBuffers()
{
- m_perfect_switch_ptr->clearBuffers();
- for (int i=0; i<m_throttles.size(); i++) {
- if (m_throttles[i] != NULL) {
- m_throttles[i]->clear();
+ m_perfect_switch_ptr->clearBuffers();
+ for (int i = 0; i < m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL) {
+ m_throttles[i]->clear();
+ }
}
- }
}
-void Switch::reconfigureOutPort(const NetDest& routing_table_entry)
+void
+Switch::reconfigureOutPort(const NetDest& routing_table_entry)
{
- m_perfect_switch_ptr->reconfigureOutPort(routing_table_entry);
+ m_perfect_switch_ptr->reconfigureOutPort(routing_table_entry);
}
-const Throttle* Switch::getThrottle(LinkID link_number) const
+const Throttle*
+Switch::getThrottle(LinkID link_number) const
{
- assert(m_throttles[link_number] != NULL);
- return m_throttles[link_number];
+ assert(m_throttles[link_number] != NULL);
+ return m_throttles[link_number];
}
-const Vector<Throttle*>* Switch::getThrottles() const
+const Vector<Throttle*>*
+Switch::getThrottles() const
{
- return &m_throttles;
+ return &m_throttles;
}
-void Switch::printStats(std::ostream& out) const
+void
+Switch::printStats(std::ostream& out) const
{
- using namespace std;
-
- out << "switch_" << m_switch_id << "_inlinks: " << m_perfect_switch_ptr->getInLinks() << endl;
- out << "switch_" << m_switch_id << "_outlinks: " << m_perfect_switch_ptr->getOutLinks() << endl;
-
- // Average link utilizations
- double average_utilization = 0.0;
- int throttle_count = 0;
-
- for (int i=0; i<m_throttles.size(); i++) {
- Throttle* throttle_ptr = m_throttles[i];
- if (throttle_ptr != NULL) {
- average_utilization += throttle_ptr->getUtilization();
- throttle_count++;
+ using namespace std;
+
+ ccprintf(out, "switch_%d_inlinks: %d\n", m_switch_id,
+ m_perfect_switch_ptr->getInLinks());
+ ccprintf(out, "switch_%d_outlinks: %d\n", m_switch_id,
+ m_perfect_switch_ptr->getOutLinks());
+
+ // Average link utilizations
+ double average_utilization = 0.0;
+ int throttle_count = 0;
+
+ for (int i = 0; i < m_throttles.size(); i++) {
+ Throttle* throttle_ptr = m_throttles[i];
+ if (throttle_ptr) {
+ average_utilization += throttle_ptr->getUtilization();
+ throttle_count++;
+ }
}
- }
- average_utilization = (throttle_count == 0) ? 0 : average_utilization / float(throttle_count);
-
- // Individual link utilizations
- out << "links_utilized_percent_switch_" << m_switch_id << ": " << average_utilization << endl;
- for (int link=0; link<m_throttles.size(); link++) {
- Throttle* throttle_ptr = m_throttles[link];
- if (throttle_ptr != NULL) {
- out << " links_utilized_percent_switch_" << m_switch_id << "_link_" << link << ": "
- << throttle_ptr->getUtilization() << " bw: " << throttle_ptr->getLinkBandwidth()
- << " base_latency: " << throttle_ptr->getLatency() << endl;
+ average_utilization =
+ throttle_count == 0 ? 0 : average_utilization / throttle_count;
+
+ // Individual link utilizations
+ out << "links_utilized_percent_switch_" << m_switch_id << ": "
+ << average_utilization << endl;
+
+ for (int link = 0; link < m_throttles.size(); link++) {
+ Throttle* throttle_ptr = m_throttles[link];
+ if (throttle_ptr != NULL) {
+ out << " links_utilized_percent_switch_" << m_switch_id
+ << "_link_" << link << ": "
+ << throttle_ptr->getUtilization() << " bw: "
+ << throttle_ptr->getLinkBandwidth()
+ << " base_latency: " << throttle_ptr->getLatency() << endl;
+ }
}
- }
- out << endl;
-
- // Traffic breakdown
- for (int link=0; link<m_throttles.size(); link++) {
- Throttle* throttle_ptr = m_throttles[link];
- if (throttle_ptr != NULL) {
- const Vector<Vector<int> >& message_counts = throttle_ptr->getCounters();
- for (int int_type=0; int_type<MessageSizeType_NUM; int_type++) {
- MessageSizeType type = MessageSizeType(int_type);
- int sum = message_counts[type].sum();
- if (sum != 0) {
- out << " outgoing_messages_switch_" << m_switch_id << "_link_" << link << "_" << type
- << ": " << sum << " " << sum * (RubySystem::getNetwork()->MessageSizeType_to_int(type))
- << " " << message_counts[type] << " base_latency: " << throttle_ptr->getLatency() << endl;
+ out << endl;
+
+ // Traffic breakdown
+ for (int link = 0; link < m_throttles.size(); link++) {
+ Throttle* throttle_ptr = m_throttles[link];
+ if (!throttle_ptr)
+ continue;
+
+ const Vector<Vector<int> >& message_counts =
+ throttle_ptr->getCounters();
+ for (int int_type = 0; int_type < MessageSizeType_NUM; int_type++) {
+ MessageSizeType type = MessageSizeType(int_type);
+ int sum = message_counts[type].sum();
+ if (sum == 0)
+ continue;
+
+ out << " outgoing_messages_switch_" << m_switch_id
+ << "_link_" << link << "_" << type << ": " << sum << " "
+ << sum * RubySystem::getNetwork()->MessageSizeType_to_int(type)
+ << " " << message_counts[type] << " base_latency: "
+ << throttle_ptr->getLatency() << endl;
}
- }
}
- }
- out << endl;
+ out << endl;
}
-void Switch::clearStats()
+void
+Switch::clearStats()
{
- m_perfect_switch_ptr->clearStats();
- for (int i=0; i<m_throttles.size(); i++) {
- if (m_throttles[i] != NULL) {
- m_throttles[i]->clearStats();
+ m_perfect_switch_ptr->clearStats();
+ for (int i = 0; i < m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL)
+ m_throttles[i]->clearStats();
}
- }
}
-void Switch::printConfig(std::ostream& out) const
+void
+Switch::printConfig(std::ostream& out) const
{
- m_perfect_switch_ptr->printConfig(out);
- for (int i=0; i<m_throttles.size(); i++) {
- if (m_throttles[i] != NULL) {
- m_throttles[i]->printConfig(out);
+ m_perfect_switch_ptr->printConfig(out);
+ for (int i = 0; i < m_throttles.size(); i++) {
+ if (m_throttles[i] != NULL)
+ m_throttles[i]->printConfig(out);
}
- }
}
-void Switch::print(std::ostream& out) const
+void
+Switch::print(std::ostream& out) const
{
- // FIXME printing
- out << "[Switch]";
+ // FIXME printing
+ out << "[Switch]";
}
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
index aa719d555..598450df3 100644
--- a/src/mem/ruby/network/simple/Switch.hh
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,26 +27,22 @@
*/
/*
- * $Id$
- *
- * Description: The actual modelled switch. It use the perfect switch and a
- * Throttle object to control and bandwidth and timing *only for
- * the output port*. So here we have un-realistic modelling,
- * since the order of PerfectSwitch and Throttle objects get
- * woke up affect the message timing. A more accurate model would
- * be having two set of system states, one for this cycle, one for
- * next cycle. And on the cycle boundary swap the two set of
- * states.
- *
+ * The actual modelled switch. It use the perfect switch and a
+ * Throttle object to control and bandwidth and timing *only for the
+ * output port*. So here we have un-realistic modelling, since the
+ * order of PerfectSwitch and Throttle objects get woke up affect the
+ * message timing. A more accurate model would be having two set of
+ * system states, one for this cycle, one for next cycle. And on the
+ * cycle boundary swap the two set of states.
*/
-#ifndef Switch_H
-#define Switch_H
+#ifndef __MEM_RUBY_NETWORK_SIMPLE_SWITCH_HH__
+#define __MEM_RUBY_NETWORK_SIMPLE_SWITCH_HH__
#include <iostream>
-#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/Global.hh"
class MessageBuffer;
class PerfectSwitch;
@@ -56,54 +51,46 @@ class SimpleNetwork;
class Throttle;
class Network;
-class Switch {
-public:
- // Constructors
-
- // constructor specifying the number of ports
- Switch(SwitchID sid, SimpleNetwork* network_ptr);
- void addInPort(const Vector<MessageBuffer*>& in);
- void addOutPort(const Vector<MessageBuffer*>& out, const NetDest& routing_table_entry, int link_latency, int bw_multiplier);
- const Throttle* getThrottle(LinkID link_number) const;
- const Vector<Throttle*>* getThrottles() const;
- void clearRoutingTables();
- void clearBuffers();
- void reconfigureOutPort(const NetDest& routing_table_entry);
-
- void printStats(std::ostream& out) const;
- void clearStats();
- void printConfig(std::ostream& out) const;
-
- // Destructor
- ~Switch();
-
- void print(std::ostream& out) const;
-private:
-
- // Private copy constructor and assignment operator
- Switch(const Switch& obj);
- Switch& operator=(const Switch& obj);
-
- // Data Members (m_ prefix)
- PerfectSwitch* m_perfect_switch_ptr;
- Network* m_network_ptr;
- Vector<Throttle*> m_throttles;
- Vector<MessageBuffer*> m_buffers_to_free;
- SwitchID m_switch_id;
+class Switch
+{
+ public:
+ Switch(SwitchID sid, SimpleNetwork* network_ptr);
+ ~Switch();
+
+ void addInPort(const Vector<MessageBuffer*>& in);
+ void addOutPort(const Vector<MessageBuffer*>& out,
+ const NetDest& routing_table_entry, int link_latency,
+ int bw_multiplier);
+ const Throttle* getThrottle(LinkID link_number) const;
+ const Vector<Throttle*>* getThrottles() const;
+ void clearRoutingTables();
+ void clearBuffers();
+ void reconfigureOutPort(const NetDest& routing_table_entry);
+
+ void printStats(std::ostream& out) const;
+ void clearStats();
+ void printConfig(std::ostream& out) const;
+
+ void print(std::ostream& out) const;
+
+ private:
+ // Private copy constructor and assignment operator
+ Switch(const Switch& obj);
+ Switch& operator=(const Switch& obj);
+
+ PerfectSwitch* m_perfect_switch_ptr;
+ Network* m_network_ptr;
+ Vector<Throttle*> m_throttles;
+ Vector<MessageBuffer*> m_buffers_to_free;
+ SwitchID m_switch_id;
};
-// Output operator declaration
-std::ostream& operator<<(std::ostream& out, const Switch& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-std::ostream& operator<<(std::ostream& out, const Switch& obj)
+inline std::ostream&
+operator<<(std::ostream& out, const Switch& obj)
{
- obj.print(out);
- out << std::flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif //Switch_H
+#endif // __MEM_RUBY_NETWORK_SIMPLE_SWITCH_HH__
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index ceba47411..f749672e2 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,19 +26,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id$
- *
- * Description: see Throttle.hh
- *
- */
-
-#include "mem/ruby/network/simple/Throttle.hh"
+#include "base/cprintf.hh"
+#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/network/Network.hh"
-#include "mem/ruby/system/System.hh"
+#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
-#include "mem/protocol/Protocol.hh"
+#include "mem/ruby/system/System.hh"
const int HIGH_RANGE = 256;
const int ADJUST_INTERVAL = 50000;
@@ -50,200 +43,232 @@ const int PRIORITY_SWITCH_LIMIT = 128;
static int network_message_to_size(NetworkMessage* net_msg_ptr);
-extern std::ostream * debug_cout_ptr;
+extern std::ostream *debug_cout_ptr;
-Throttle::Throttle(int sID, NodeID node, int link_latency, int link_bandwidth_multiplier)
+Throttle::Throttle(int sID, NodeID node, int link_latency,
+ int link_bandwidth_multiplier)
{
- init(node, link_latency, link_bandwidth_multiplier);
- m_sID = sID;
+ init(node, link_latency, link_bandwidth_multiplier);
+ m_sID = sID;
}
-Throttle::Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier)
+Throttle::Throttle(NodeID node, int link_latency,
+ int link_bandwidth_multiplier)
{
- init(node, link_latency, link_bandwidth_multiplier);
- m_sID = 0;
+ init(node, link_latency, link_bandwidth_multiplier);
+ m_sID = 0;
}
-void Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
+void
+Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
{
- m_node = node;
- m_vnets = 0;
+ m_node = node;
+ m_vnets = 0;
- ASSERT(link_bandwidth_multiplier > 0);
- m_link_bandwidth_multiplier = link_bandwidth_multiplier;
- m_link_latency = link_latency;
+ ASSERT(link_bandwidth_multiplier > 0);
+ m_link_bandwidth_multiplier = link_bandwidth_multiplier;
+ m_link_latency = link_latency;
- m_wakeups_wo_switch = 0;
- clearStats();
+ m_wakeups_wo_switch = 0;
+ clearStats();
}
-void Throttle::clear()
+void
+Throttle::clear()
{
- for (int counter = 0; counter < m_vnets; counter++) {
- m_in[counter]->clear();
- m_out[counter]->clear();
- }
+ for (int counter = 0; counter < m_vnets; counter++) {
+ m_in[counter]->clear();
+ m_out[counter]->clear();
+ }
}
-void Throttle::addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<MessageBuffer*>& out_vec)
+void
+Throttle::addLinks(const Vector<MessageBuffer*>& in_vec,
+ const Vector<MessageBuffer*>& out_vec)
{
- assert(in_vec.size() == out_vec.size());
- for (int i=0; i<in_vec.size(); i++) {
- addVirtualNetwork(in_vec[i], out_vec[i]);
- }
-
- m_message_counters.setSize(MessageSizeType_NUM);
- for (int i=0; i<MessageSizeType_NUM; i++) {
- m_message_counters[i].setSize(in_vec.size());
- for (int j=0; j<m_message_counters[i].size(); j++) {
- m_message_counters[i][j] = 0;
+ assert(in_vec.size() == out_vec.size());
+ for (int i=0; i<in_vec.size(); i++) {
+ addVirtualNetwork(in_vec[i], out_vec[i]);
+ }
+
+ m_message_counters.setSize(MessageSizeType_NUM);
+ for (int i = 0; i < MessageSizeType_NUM; i++) {
+ m_message_counters[i].setSize(in_vec.size());
+ for (int j = 0; j<m_message_counters[i].size(); j++) {
+ m_message_counters[i][j] = 0;
+ }
}
- }
}
-void Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
+void
+Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
{
- m_units_remaining.insertAtBottom(0);
- m_in.insertAtBottom(in_ptr);
- m_out.insertAtBottom(out_ptr);
-
- // Set consumer and description
- m_in[m_vnets]->setConsumer(this);
- string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " + NodeIDToString(m_node) + "]";
- m_in[m_vnets]->setDescription(desc);
- m_vnets++;
+ m_units_remaining.insertAtBottom(0);
+ m_in.insertAtBottom(in_ptr);
+ m_out.insertAtBottom(out_ptr);
+
+ // Set consumer and description
+ m_in[m_vnets]->setConsumer(this);
+ string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " +
+ NodeIDToString(m_node) + "]";
+ m_in[m_vnets]->setDescription(desc);
+ m_vnets++;
}
-void Throttle::wakeup()
+void
+Throttle::wakeup()
{
- // Limits the number of message sent to a limited number of bytes/cycle.
- assert(getLinkBandwidth() > 0);
- int bw_remaining = getLinkBandwidth();
-
- // Give the highest numbered link priority most of the time
- m_wakeups_wo_switch++;
- int highest_prio_vnet = m_vnets-1;
- int lowest_prio_vnet = 0;
- int counter = 1;
- bool schedule_wakeup = false;
-
- // invert priorities to avoid starvation seen in the component network
- if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
- m_wakeups_wo_switch = 0;
- highest_prio_vnet = 0;
- lowest_prio_vnet = m_vnets-1;
- counter = -1;
- }
-
- for (int vnet = highest_prio_vnet; (vnet*counter) >= (counter*lowest_prio_vnet); vnet -= counter) {
-
- assert(m_out[vnet] != NULL);
- assert(m_in[vnet] != NULL);
- assert(m_units_remaining[vnet] >= 0);
-
- while ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && m_out[vnet]->areNSlotsAvailable(1)) {
-
- // See if we are done transferring the previous message on this virtual network
- if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
-
- // Find the size of the message we are moving
- MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
- NetworkMessage* net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
- m_units_remaining[vnet] += network_message_to_size(net_msg_ptr);
-
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
- DEBUG_MSG(NETWORK_COMP,HighPrio,"throttle: " + int_to_string(m_node)
- + " my bw " + int_to_string(getLinkBandwidth())
- + " bw spent enqueueing net msg " + int_to_string(m_units_remaining[vnet])
- + " time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
-
- // Move the message
- m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
- m_in[vnet]->pop();
-
- // Count the message
- m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
-
- DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
- }
+ // Limits the number of message sent to a limited number of bytes/cycle.
+ assert(getLinkBandwidth() > 0);
+ int bw_remaining = getLinkBandwidth();
+
+ // Give the highest numbered link priority most of the time
+ m_wakeups_wo_switch++;
+ int highest_prio_vnet = m_vnets-1;
+ int lowest_prio_vnet = 0;
+ int counter = 1;
+ bool schedule_wakeup = false;
+
+ // invert priorities to avoid starvation seen in the component network
+ if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
+ m_wakeups_wo_switch = 0;
+ highest_prio_vnet = 0;
+ lowest_prio_vnet = m_vnets-1;
+ counter = -1;
+ }
- // Calculate the amount of bandwidth we spent on this message
- int diff = m_units_remaining[vnet] - bw_remaining;
- m_units_remaining[vnet] = max(0, diff);
- bw_remaining = max(0, -diff);
+ for (int vnet = highest_prio_vnet;
+ (vnet * counter) >= (counter * lowest_prio_vnet);
+ vnet -= counter) {
+
+ assert(m_out[vnet] != NULL);
+ assert(m_in[vnet] != NULL);
+ assert(m_units_remaining[vnet] >= 0);
+
+ while (bw_remaining > 0 &&
+ (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
+ m_out[vnet]->areNSlotsAvailable(1)) {
+
+ // See if we are done transferring the previous message on
+ // this virtual network
+ if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
+ // Find the size of the message we are moving
+ MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
+ NetworkMessage* net_msg_ptr =
+ safe_cast<NetworkMessage*>(msg_ptr.ref());
+ m_units_remaining[vnet] +=
+ network_message_to_size(net_msg_ptr);
+
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ DEBUG_MSG(NETWORK_COMP, HighPrio,
+ csprintf("throttle: %d my bw %d bw spent enqueueing "
+ "net msg %d time: %d.",
+ m_node, getLinkBandwidth(), m_units_remaining[vnet],
+ g_eventQueue_ptr->getTime()));
+
+ // Move the message
+ m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
+ m_in[vnet]->pop();
+
+ // Count the message
+ m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
+
+ DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
+ DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ }
+
+ // Calculate the amount of bandwidth we spent on this message
+ int diff = m_units_remaining[vnet] - bw_remaining;
+ m_units_remaining[vnet] = max(0, diff);
+ bw_remaining = max(0, -diff);
+ }
+
+ if (bw_remaining > 0 &&
+ (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
+ !m_out[vnet]->areNSlotsAvailable(1)) {
+ DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
+ // schedule me to wakeup again because I'm waiting for my
+ // output queue to become available
+ schedule_wakeup = true;
+ }
}
- if ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && !m_out[vnet]->areNSlotsAvailable(1)) {
- DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
- schedule_wakeup = true; // schedule me to wakeup again because I'm waiting for my output queue to become available
+ // We should only wake up when we use the bandwidth
+ // This is only mostly true
+ // assert(bw_remaining != getLinkBandwidth());
+
+ // Record that we used some or all of the link bandwidth this cycle
+ double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth()));
+
+ // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
+ linkUtilized(ratio);
+
+ if (bw_remaining > 0 && !schedule_wakeup) {
+ // We have extra bandwidth and our output buffer was
+ // available, so we must not have anything else to do until
+ // another message arrives.
+ DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
+ DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again");
+ } else {
+ DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
+ DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again");
+
+ // We are out of bandwidth for this cycle, so wakeup next
+ // cycle and continue
+ g_eventQueue_ptr->scheduleEvent(this, 1);
}
- }
-
- // We should only wake up when we use the bandwidth
- // assert(bw_remaining != getLinkBandwidth()); // This is only mostly true
-
- // Record that we used some or all of the link bandwidth this cycle
- double ratio = 1.0-(double(bw_remaining)/double(getLinkBandwidth()));
- // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
- linkUtilized(ratio);
-
- if ((bw_remaining > 0) && !schedule_wakeup) {
- // We have extra bandwidth and our output buffer was available, so we must not have anything else to do until another message arrives.
- DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
- DEBUG_MSG(NETWORK_COMP,LowPrio,"not scheduled again");
- } else {
- DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
- DEBUG_MSG(NETWORK_COMP,LowPrio,"scheduled again");
- // We are out of bandwidth for this cycle, so wakeup next cycle and continue
- g_eventQueue_ptr->scheduleEvent(this, 1);
- }
}
-void Throttle::printStats(ostream& out) const
+void
+Throttle::printStats(ostream& out) const
{
- out << "utilized_percent: " << getUtilization() << endl;
+ out << "utilized_percent: " << getUtilization() << endl;
}
-void Throttle::clearStats()
+void
+Throttle::clearStats()
{
- m_ruby_start = g_eventQueue_ptr->getTime();
- m_links_utilized = 0.0;
+ m_ruby_start = g_eventQueue_ptr->getTime();
+ m_links_utilized = 0.0;
- for (int i=0; i<m_message_counters.size(); i++) {
- for (int j=0; j<m_message_counters[i].size(); j++) {
- m_message_counters[i][j] = 0;
+ for (int i = 0; i < m_message_counters.size(); i++) {
+ for (int j = 0; j < m_message_counters[i].size(); j++) {
+ m_message_counters[i][j] = 0;
+ }
}
- }
}
-void Throttle::printConfig(ostream& out) const
+void
+Throttle::printConfig(ostream& out) const
{
-
}
-double Throttle::getUtilization() const
+double
+Throttle::getUtilization() const
{
- return (100.0 * double(m_links_utilized)) / (double(g_eventQueue_ptr->getTime()-m_ruby_start));
+ return 100.0 * double(m_links_utilized) /
+ double(g_eventQueue_ptr->getTime()-m_ruby_start);
}
-void Throttle::print(ostream& out) const
+void
+Throttle::print(ostream& out) const
{
- out << "[Throttle: " << m_sID << " " << m_node << " bw: " << getLinkBandwidth() << "]";
+ out << "[Throttle: " << m_sID << " " << m_node
+ << " bw: " << getLinkBandwidth() << "]";
}
-// Helper function
-
-static
-int network_message_to_size(NetworkMessage* net_msg_ptr)
+int
+network_message_to_size(NetworkMessage* net_msg_ptr)
{
- assert(net_msg_ptr != NULL);
+ assert(net_msg_ptr != NULL);
- // Artificially increase the size of broadcast messages
- if (BROADCAST_SCALING > 1) {
- if (net_msg_ptr->getDestination().isBroadcast()) {
- return (RubySystem::getNetwork()->MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER * BROADCAST_SCALING);
- }
- }
- return (RubySystem::getNetwork()->MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER);
+ int size = RubySystem::getNetwork()->
+ MessageSizeType_to_int(net_msg_ptr->getMessageSize());
+ size *= MESSAGE_SIZE_MULTIPLIER;
+
+ // Artificially increase the size of broadcast messages
+ if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast())
+ size *= BROADCAST_SCALING;
+
+ return size;
}
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index 20aeed820..608754190 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,94 +27,92 @@
*/
/*
- * $Id$
- *
- * Description: The class to implement bandwidth and latency throttle. An
- * instance of consumer class that can be woke up. It is only used
- * to control bandwidth at output port of a switch. And the
- * throttle is added *after* the output port, means the message is
- * put in the output port of the PerfectSwitch (a
- * intermediateBuffers) first, then go through the Throttle.
- *
+ * The class to implement bandwidth and latency throttle. An instance
+ * of consumer class that can be woke up. It is only used to control
+ * bandwidth at output port of a switch. And the throttle is added
+ * *after* the output port, means the message is put in the output
+ * port of the PerfectSwitch (a intermediateBuffers) first, then go
+ * through the Throttle.
*/
-#ifndef THROTTLE_H
-#define THROTTLE_H
+#ifndef __MEM_RUBY_NETWORK_SIMPLE_THROTTLE_HH__
+#define __MEM_RUBY_NETWORK_SIMPLE_THROTTLE_HH__
-#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/network/Network.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/System.hh"
-#include "mem/ruby/network/Network.hh"
class MessageBuffer;
-class Throttle : public Consumer {
-public:
- // Constructors
- Throttle(int sID, NodeID node, int link_latency, int link_bandwidth_multiplier);
- Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier);
-
- // Destructor
- ~Throttle() {}
-
- // Public Methods
- void addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<MessageBuffer*>& out_vec);
- void wakeup();
-
- void printStats(ostream& out) const;
- void clearStats();
- void printConfig(ostream& out) const;
- double getUtilization() const; // The average utilization (a percent) since last clearStats()
- int getLinkBandwidth() const { return RubySystem::getNetwork()->getEndpointBandwidth() * m_link_bandwidth_multiplier; }
- int getLatency() const { return m_link_latency; }
-
- const Vector<Vector<int> >& getCounters() const { return m_message_counters; }
-
- void clear();
-
- void print(ostream& out) const;
-
-private:
- // Private Methods
- void init(NodeID node, int link_latency, int link_bandwidth_multiplier);
- void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
- void linkUtilized(double ratio) { m_links_utilized += ratio; }
-
- // Private copy constructor and assignment operator
- Throttle(const Throttle& obj);
- Throttle& operator=(const Throttle& obj);
-
- // Data Members (m_ prefix)
- Vector<MessageBuffer*> m_in;
- Vector<MessageBuffer*> m_out;
- Vector<Vector<int> > m_message_counters;
- int m_vnets;
- Vector<int> m_units_remaining;
- int m_sID;
- NodeID m_node;
- int m_link_bandwidth_multiplier;
- int m_link_latency;
- int m_wakeups_wo_switch;
-
- // For tracking utilization
- Time m_ruby_start;
- double m_links_utilized;
+class Throttle : public Consumer
+{
+ public:
+ Throttle(int sID, NodeID node, int link_latency,
+ int link_bandwidth_multiplier);
+ Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier);
+ ~Throttle() {}
+
+ void addLinks(const Vector<MessageBuffer*>& in_vec,
+ const Vector<MessageBuffer*>& out_vec);
+ void wakeup();
+
+ void printStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ // The average utilization (a percent) since last clearStats()
+ double getUtilization() const;
+ int
+ getLinkBandwidth() const
+ {
+ return RubySystem::getNetwork()->getEndpointBandwidth() *
+ m_link_bandwidth_multiplier;
+ }
+ int getLatency() const { return m_link_latency; }
+
+ const Vector<Vector<int> >&
+ getCounters() const
+ {
+ return m_message_counters;
+ }
+
+ void clear();
+
+ void print(ostream& out) const;
+
+ private:
+ void init(NodeID node, int link_latency, int link_bandwidth_multiplier);
+ void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
+ void linkUtilized(double ratio) { m_links_utilized += ratio; }
+
+ // Private copy constructor and assignment operator
+ Throttle(const Throttle& obj);
+ Throttle& operator=(const Throttle& obj);
+
+ Vector<MessageBuffer*> m_in;
+ Vector<MessageBuffer*> m_out;
+ Vector<Vector<int> > m_message_counters;
+ int m_vnets;
+ Vector<int> m_units_remaining;
+ int m_sID;
+ NodeID m_node;
+ int m_link_bandwidth_multiplier;
+ int m_link_latency;
+ int m_wakeups_wo_switch;
+
+ // For tracking utilization
+ Time m_ruby_start;
+ double m_links_utilized;
};
-// Output operator declaration
-ostream& operator<<(ostream& out, const Throttle& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const Throttle& obj)
+inline ostream&
+operator<<(ostream& out, const Throttle& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << flush;
+ return out;
}
-#endif //THROTTLE_H
+#endif // __MEM_RUBY_NETWORK_SIMPLE_THROTTLE_HH__
diff --git a/src/mem/ruby/network/simple/Topology.cc b/src/mem/ruby/network/simple/Topology.cc
index a8ce4db84..3d7aa35d0 100644
--- a/src/mem/ruby/network/simple/Topology.cc
+++ b/src/mem/ruby/network/simple/Topology.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,27 +26,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * Topology.cc
- *
- * Description: See Topology.hh
- *
- * $Id$
- *
- * */
-
-#include "mem/ruby/network/simple/Topology.hh"
-#include "mem/ruby/common/NetDest.hh"
-#include "mem/ruby/network/Network.hh"
-#include "mem/ruby/slicc_interface/AbstractController.hh"
-#include "mem/protocol/TopologyType.hh"
#include "mem/gems_common/util.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
+#include "mem/protocol/TopologyType.hh"
+#include "mem/ruby/common/NetDest.hh"
+#include "mem/ruby/network/Network.hh"
+#include "mem/ruby/network/simple/Topology.hh"
+#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/System.hh"
-static const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
-static const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above :)
+const int INFINITE_LATENCY = 10000; // Yes, this is a big hack
+const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above :)
// Note: In this file, we use the first 2*m_nodes SwitchIDs to
// represent the input and output endpoint links. These really are
@@ -57,10 +47,14 @@ static const int DEFAULT_BW_MULTIPLIER = 1; // Just to be consistent with above
// of the network.
// Helper functions based on chapter 29 of Cormen et al.
-static void extend_shortest_path(Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
-static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches);
-static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final, const Matrix& weights, const Matrix& dist);
-static NetDest shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights, const Matrix& dist);
+void extend_shortest_path(Matrix& current_dist, Matrix& latencies,
+ Matrix& inter_switches);
+Matrix shortest_path(const Matrix& weights, Matrix& latencies,
+ Matrix& inter_switches);
+bool link_is_shortest_path_to_node(SwitchID src, SwitchID next,
+ SwitchID final, const Matrix& weights, const Matrix& dist);
+NetDest shortest_path_to_node(SwitchID src, SwitchID next,
+ const Matrix& weights, const Matrix& dist);
Topology::Topology(const Params *p)
: SimObject(p)
@@ -71,306 +65,356 @@ Topology::Topology(const Params *p)
m_component_latencies.setSize(0);
m_component_inter_switches.setSize(0);
- //
// Total nodes/controllers in network
// Must make sure this is called after the State Machine constructors
- //
m_nodes = MachineType_base_number(MachineType_NUM);
assert(m_nodes > 1);
- if (m_nodes != params()->ext_links.size()) {
+ if (m_nodes != params()->ext_links.size() &&
+ m_nodes != params()->ext_links.size()) {
fatal("m_nodes (%d) != ext_links vector length (%d)\n",
- m_nodes != params()->ext_links.size());
+ m_nodes != params()->ext_links.size());
}
- //
- // First create the links between the endpoints (i.e. controllers) and the
- // network.
- //
- for (vector<ExtLink*>::const_iterator i = params()->ext_links.begin();
- i != params()->ext_links.end(); ++i)
- {
- const ExtLinkParams *p = (*i)->params();
- AbstractController *c = p->ext_node;
-
- // Store the controller pointers for later
- m_controller_vector.insertAtBottom(c);
-
- int ext_idx1 =
- MachineType_base_number(c->getMachineType()) + c->getVersion();
- int ext_idx2 = ext_idx1 + m_nodes;
- int int_idx = p->int_node + 2*m_nodes;
-
- // create the links in both directions
- addLink(ext_idx1, int_idx, p->latency, p->bw_multiplier, p->weight);
- addLink(int_idx, ext_idx2, p->latency, p->bw_multiplier, p->weight);
- }
-
- for (vector<IntLink*>::const_iterator i = params()->int_links.begin();
- i != params()->int_links.end(); ++i)
- {
- const IntLinkParams *p = (*i)->params();
- int a = p->node_a + 2*m_nodes;
- int b = p->node_b + 2*m_nodes;
-
- // create the links in both directions
- addLink(a, b, p->latency, p->bw_multiplier, p->weight);
- addLink(b, a, p->latency, p->bw_multiplier, p->weight);
- }
+ // First create the links between the endpoints (i.e. controllers)
+ // and the network.
+ for (vector<ExtLink*>::const_iterator i = params()->ext_links.begin();
+ i != params()->ext_links.end(); ++i) {
+ const ExtLinkParams *p = (*i)->params();
+ AbstractController *c = p->ext_node;
+
+ // Store the controller pointers for later
+ m_controller_vector.insertAtBottom(c);
+
+ int ext_idx1 =
+ MachineType_base_number(c->getMachineType()) + c->getVersion();
+ int ext_idx2 = ext_idx1 + m_nodes;
+ int int_idx = p->int_node + 2*m_nodes;
+
+ // create the links in both directions
+ addLink(ext_idx1, int_idx, p->latency, p->bw_multiplier, p->weight);
+ addLink(int_idx, ext_idx2, p->latency, p->bw_multiplier, p->weight);
+ }
+
+ for (vector<IntLink*>::const_iterator i = params()->int_links.begin();
+ i != params()->int_links.end(); ++i) {
+ const IntLinkParams *p = (*i)->params();
+ int a = p->node_a + 2*m_nodes;
+ int b = p->node_b + 2*m_nodes;
+
+ // create the links in both directions
+ addLink(a, b, p->latency, p->bw_multiplier, p->weight);
+ addLink(b, a, p->latency, p->bw_multiplier, p->weight);
+ }
}
-void Topology::initNetworkPtr(Network* net_ptr)
+void
+Topology::initNetworkPtr(Network* net_ptr)
{
- for (int cntrl = 0; cntrl < m_controller_vector.size(); cntrl++)
- {
+ for (int cntrl = 0; cntrl < m_controller_vector.size(); cntrl++) {
m_controller_vector[cntrl]->initNetworkPtr(net_ptr);
}
}
-
-void Topology::createLinks(Network *net, bool isReconfiguration)
+void
+Topology::createLinks(Network *net, bool isReconfiguration)
{
- // Find maximum switchID
-
- SwitchID max_switch_id = 0;
- for (int i=0; i<m_links_src_vector.size(); i++) {
- max_switch_id = max(max_switch_id, m_links_src_vector[i]);
- max_switch_id = max(max_switch_id, m_links_dest_vector[i]);
- }
-
- // Initialize weight vector
- Matrix topology_weights;
- Matrix topology_latency;
- Matrix topology_bw_multis;
- int num_switches = max_switch_id+1;
- topology_weights.setSize(num_switches);
- topology_latency.setSize(num_switches);
- topology_bw_multis.setSize(num_switches);
- m_component_latencies.setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
- m_component_inter_switches.setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
- for(int i=0; i<topology_weights.size(); i++) {
- topology_weights[i].setSize(num_switches);
- topology_latency[i].setSize(num_switches);
- topology_bw_multis[i].setSize(num_switches);
- m_component_latencies[i].setSize(num_switches);
- m_component_inter_switches[i].setSize(num_switches); // FIXME setting the size of a member variable here is a HACK!
- for(int j=0; j<topology_weights[i].size(); j++) {
- topology_weights[i][j] = INFINITE_LATENCY;
- topology_latency[i][j] = -1; // initialize to an invalid value
- topology_bw_multis[i][j] = -1; // initialize to an invalid value
- m_component_latencies[i][j] = -1; // initialize to an invalid value
- m_component_inter_switches[i][j] = 0; // initially assume direct connections / no intermediate switches between components
+ // Find maximum switchID
+ SwitchID max_switch_id = 0;
+ for (int i = 0; i < m_links_src_vector.size(); i++) {
+ max_switch_id = max(max_switch_id, m_links_src_vector[i]);
+ max_switch_id = max(max_switch_id, m_links_dest_vector[i]);
}
- }
-
- // Set identity weights to zero
- for(int i=0; i<topology_weights.size(); i++) {
- topology_weights[i][i] = 0;
- }
-
- // Fill in the topology weights and bandwidth multipliers
- for (int i=0; i<m_links_src_vector.size(); i++) {
- topology_weights[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_weight_vector[i];
- topology_latency[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_latency_vector[i];
- m_component_latencies[m_links_src_vector[i]][m_links_dest_vector[i]] = m_links_latency_vector[i]; // initialize to latency vector
- topology_bw_multis[m_links_src_vector[i]][m_links_dest_vector[i]] = m_bw_multiplier_vector[i];
- }
-
- // Walk topology and hookup the links
- Matrix dist = shortest_path(topology_weights, m_component_latencies, m_component_inter_switches);
- for(int i=0; i<topology_weights.size(); i++) {
- for(int j=0; j<topology_weights[i].size(); j++) {
- int weight = topology_weights[i][j];
- int bw_multiplier = topology_bw_multis[i][j];
- int latency = topology_latency[i][j];
- if (weight > 0 && weight != INFINITE_LATENCY) {
- NetDest destination_set = shortest_path_to_node(i, j, topology_weights, dist);
- assert(latency != -1);
- makeLink(net, i, j, destination_set, latency, weight, bw_multiplier, isReconfiguration);
- }
+
+ // Initialize weight vector
+ Matrix topology_weights;
+ Matrix topology_latency;
+ Matrix topology_bw_multis;
+ int num_switches = max_switch_id+1;
+ topology_weights.setSize(num_switches);
+ topology_latency.setSize(num_switches);
+ topology_bw_multis.setSize(num_switches);
+
+ // FIXME setting the size of a member variable here is a HACK!
+ m_component_latencies.setSize(num_switches);
+
+ // FIXME setting the size of a member variable here is a HACK!
+ m_component_inter_switches.setSize(num_switches);
+
+ for (int i = 0; i < topology_weights.size(); i++) {
+ topology_weights[i].setSize(num_switches);
+ topology_latency[i].setSize(num_switches);
+ topology_bw_multis[i].setSize(num_switches);
+ m_component_latencies[i].setSize(num_switches);
+
+ // FIXME setting the size of a member variable here is a HACK!
+ m_component_inter_switches[i].setSize(num_switches);
+
+ for (int j = 0; j < topology_weights[i].size(); j++) {
+ topology_weights[i][j] = INFINITE_LATENCY;
+
+ // initialize to invalid values
+ topology_latency[i][j] = -1;
+ topology_bw_multis[i][j] = -1;
+ m_component_latencies[i][j] = -1;
+
+ // initially assume direct connections / no intermediate
+ // switches between components
+ m_component_inter_switches[i][j] = 0;
+ }
+ }
+
+ // Set identity weights to zero
+ for (int i = 0; i < topology_weights.size(); i++) {
+ topology_weights[i][i] = 0;
+ }
+
+ // Fill in the topology weights and bandwidth multipliers
+ for (int i = 0; i < m_links_src_vector.size(); i++) {
+ int src = m_links_src_vector[i];
+ int dst = m_links_dest_vector[i];
+ topology_weights[src][dst] = m_links_weight_vector[i];
+ topology_latency[src][dst] = m_links_latency_vector[i];
+ m_component_latencies[src][dst] = m_links_latency_vector[i];
+ topology_bw_multis[src][dst] = m_bw_multiplier_vector[i];
+ }
+
+ // Walk topology and hookup the links
+ Matrix dist = shortest_path(topology_weights, m_component_latencies,
+ m_component_inter_switches);
+ for (int i = 0; i < topology_weights.size(); i++) {
+ for (int j = 0; j < topology_weights[i].size(); j++) {
+ int weight = topology_weights[i][j];
+ int bw_multiplier = topology_bw_multis[i][j];
+ int latency = topology_latency[i][j];
+ if (weight > 0 && weight != INFINITE_LATENCY) {
+ NetDest destination_set = shortest_path_to_node(i, j,
+ topology_weights, dist);
+ assert(latency != -1);
+ makeLink(net, i, j, destination_set, latency, weight,
+ bw_multiplier, isReconfiguration);
+ }
+ }
}
- }
}
-SwitchID Topology::newSwitchID()
+SwitchID
+Topology::newSwitchID()
{
- m_number_of_switches++;
- return m_number_of_switches-1+m_nodes+m_nodes;
+ m_number_of_switches++;
+ return m_number_of_switches-1+m_nodes+m_nodes;
}
-void Topology::addLink(SwitchID src, SwitchID dest, int link_latency)
+void
+Topology::addLink(SwitchID src, SwitchID dest, int link_latency)
{
- addLink(src, dest, link_latency, DEFAULT_BW_MULTIPLIER, link_latency);
+ addLink(src, dest, link_latency, DEFAULT_BW_MULTIPLIER, link_latency);
}
-void Topology::addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier)
+void
+Topology::addLink(SwitchID src, SwitchID dest, int link_latency,
+ int bw_multiplier)
{
- addLink(src, dest, link_latency, bw_multiplier, link_latency);
+ addLink(src, dest, link_latency, bw_multiplier, link_latency);
}
-void Topology::addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier, int link_weight)
+void
+Topology::addLink(SwitchID src, SwitchID dest, int link_latency,
+ int bw_multiplier, int link_weight)
{
- ASSERT(src <= m_number_of_switches+m_nodes+m_nodes);
- ASSERT(dest <= m_number_of_switches+m_nodes+m_nodes);
- m_links_src_vector.insertAtBottom(src);
- m_links_dest_vector.insertAtBottom(dest);
- m_links_latency_vector.insertAtBottom(link_latency);
- m_links_weight_vector.insertAtBottom(link_weight);
- m_bw_multiplier_vector.insertAtBottom(bw_multiplier);
+ ASSERT(src <= m_number_of_switches+m_nodes+m_nodes);
+ ASSERT(dest <= m_number_of_switches+m_nodes+m_nodes);
+ m_links_src_vector.insertAtBottom(src);
+ m_links_dest_vector.insertAtBottom(dest);
+ m_links_latency_vector.insertAtBottom(link_latency);
+ m_links_weight_vector.insertAtBottom(link_weight);
+ m_bw_multiplier_vector.insertAtBottom(bw_multiplier);
}
-void Topology::makeLink(Network *net, SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int link_weight, int bw_multiplier, bool isReconfiguration)
+void
+Topology::makeLink(Network *net, SwitchID src, SwitchID dest,
+ const NetDest& routing_table_entry, int link_latency, int link_weight,
+ int bw_multiplier, bool isReconfiguration)
{
- // Make sure we're not trying to connect two end-point nodes directly together
- assert((src >= 2*m_nodes) || (dest >= 2*m_nodes));
-
- if (src < m_nodes) {
- net->makeInLink(src, dest-(2*m_nodes), routing_table_entry, link_latency, bw_multiplier, isReconfiguration);
- } else if (dest < 2*m_nodes) {
- assert(dest >= m_nodes);
- NodeID node = dest-m_nodes;
- net->makeOutLink(src-(2*m_nodes), node, routing_table_entry, link_latency, link_weight, bw_multiplier, isReconfiguration);
- } else {
- assert((src >= 2*m_nodes) && (dest >= 2*m_nodes));
- net->makeInternalLink(src-(2*m_nodes), dest-(2*m_nodes), routing_table_entry, link_latency, link_weight, bw_multiplier, isReconfiguration);
- }
+ // Make sure we're not trying to connect two end-point nodes
+ // directly together
+ assert(src >= 2 * m_nodes || dest >= 2 * m_nodes);
+
+ if (src < m_nodes) {
+ net->makeInLink(src, dest-(2*m_nodes), routing_table_entry,
+ link_latency, bw_multiplier, isReconfiguration);
+ } else if (dest < 2*m_nodes) {
+ assert(dest >= m_nodes);
+ NodeID node = dest-m_nodes;
+ net->makeOutLink(src-(2*m_nodes), node, routing_table_entry,
+ link_latency, link_weight, bw_multiplier, isReconfiguration);
+ } else {
+ assert((src >= 2*m_nodes) && (dest >= 2*m_nodes));
+ net->makeInternalLink(src-(2*m_nodes), dest-(2*m_nodes),
+ routing_table_entry, link_latency, link_weight, bw_multiplier,
+ isReconfiguration);
+ }
}
-void Topology::printStats(std::ostream& out) const
+void
+Topology::printStats(std::ostream& out) const
{
for (int cntrl = 0; cntrl < m_controller_vector.size(); cntrl++) {
- m_controller_vector[cntrl]->printStats(out);
+ m_controller_vector[cntrl]->printStats(out);
}
}
-void Topology::clearStats()
+void
+Topology::clearStats()
{
for (int cntrl = 0; cntrl < m_controller_vector.size(); cntrl++) {
m_controller_vector[cntrl]->clearStats();
}
}
-void Topology::printConfig(std::ostream& out) const
+void
+Topology::printConfig(std::ostream& out) const
{
- using namespace std;
-
- if (m_print_config == false) return;
-
- assert(m_component_latencies.size() > 0);
-
- out << "--- Begin Topology Print ---" << endl;
- out << endl;
- out << "Topology print ONLY indicates the _NETWORK_ latency between two machines" << endl;
- out << "It does NOT include the latency within the machines" << endl;
- out << endl;
- for (int m=0; m<MachineType_NUM; m++) {
- for (int i=0; i<MachineType_base_count((MachineType)m); i++) {
- MachineID cur_mach = {(MachineType)m, i};
- out << cur_mach << " Network Latencies" << endl;
- for (int n=0; n<MachineType_NUM; n++) {
- for (int j=0; j<MachineType_base_count((MachineType)n); j++) {
- MachineID dest_mach = {(MachineType)n, j};
- if (cur_mach != dest_mach) {
- int link_latency = m_component_latencies[MachineType_base_number((MachineType)m)+i][MachineType_base_number(MachineType_NUM)+MachineType_base_number((MachineType)n)+j];
- int intermediate_switches = m_component_inter_switches[MachineType_base_number((MachineType)m)+i][MachineType_base_number(MachineType_NUM)+MachineType_base_number((MachineType)n)+j];
- out << " " << cur_mach << " -> " << dest_mach << " net_lat: "
- << link_latency+intermediate_switches << endl; // NOTE switches are assumed to have single cycle latency
- }
+ using namespace std;
+
+ if (m_print_config == false)
+ return;
+
+ assert(m_component_latencies.size() > 0);
+
+ out << "--- Begin Topology Print ---" << endl
+ << endl
+ << "Topology print ONLY indicates the _NETWORK_ latency between two "
+ << "machines" << endl
+ << "It does NOT include the latency within the machines" << endl
+ << endl;
+
+ for (int m = 0; m < MachineType_NUM; m++) {
+ int i_end = MachineType_base_count((MachineType)m);
+ for (int i = 0; i < i_end; i++) {
+ MachineID cur_mach = {(MachineType)m, i};
+ out << cur_mach << " Network Latencies" << endl;
+ for (int n = 0; n < MachineType_NUM; n++) {
+ int j_end = MachineType_base_count((MachineType)n);
+ for (int j = 0; j < j_end; j++) {
+ MachineID dest_mach = {(MachineType)n, j};
+ if (cur_mach == dest_mach)
+ continue;
+
+ int src = MachineType_base_number((MachineType)m) + i;
+ int dst = MachineType_base_number(MachineType_NUM) +
+ MachineType_base_number((MachineType)n) + j;
+ int link_latency = m_component_latencies[src][dst];
+ int intermediate_switches =
+ m_component_inter_switches[src][dst];
+
+ // NOTE switches are assumed to have single
+ // cycle latency
+ out << " " << cur_mach << " -> " << dest_mach
+ << " net_lat: "
+ << link_latency + intermediate_switches << endl;
+ }
+ }
+ out << endl;
}
- }
- out << endl;
}
- }
- out << "--- End Topology Print ---" << endl;
+ out << "--- End Topology Print ---" << endl;
}
-/**************************************************************************/
-
// The following all-pairs shortest path algorithm is based on the
// discussion from Cormen et al., Chapter 26.1.
-
-static void extend_shortest_path(Matrix& current_dist, Matrix& latencies, Matrix& inter_switches)
+void
+extend_shortest_path(Matrix& current_dist, Matrix& latencies,
+ Matrix& inter_switches)
{
- bool change = true;
- int nodes = current_dist.size();
-
- while (change) {
- change = false;
- for (int i=0; i<nodes; i++) {
- for (int j=0; j<nodes; j++) {
- int minimum = current_dist[i][j];
- int previous_minimum = minimum;
- int intermediate_switch = -1;
- for (int k=0; k<nodes; k++) {
- minimum = min(minimum, current_dist[i][k] + current_dist[k][j]);
- if (previous_minimum != minimum) {
- intermediate_switch = k;
- inter_switches[i][j] = inter_switches[i][k] + inter_switches[k][j] + 1;
- }
- previous_minimum = minimum;
+ bool change = true;
+ int nodes = current_dist.size();
+
+ while (change) {
+ change = false;
+ for (int i = 0; i < nodes; i++) {
+ for (int j = 0; j < nodes; j++) {
+ int minimum = current_dist[i][j];
+ int previous_minimum = minimum;
+ int intermediate_switch = -1;
+ for (int k = 0; k < nodes; k++) {
+ minimum = min(minimum,
+ current_dist[i][k] + current_dist[k][j]);
+ if (previous_minimum != minimum) {
+ intermediate_switch = k;
+ inter_switches[i][j] =
+ inter_switches[i][k] +
+ inter_switches[k][j] + 1;
+ }
+ previous_minimum = minimum;
+ }
+ if (current_dist[i][j] != minimum) {
+ change = true;
+ current_dist[i][j] = minimum;
+ assert(intermediate_switch >= 0);
+ assert(intermediate_switch < latencies[i].size());
+ latencies[i][j] = latencies[i][intermediate_switch] +
+ latencies[intermediate_switch][j];
+ }
+ }
}
- if (current_dist[i][j] != minimum) {
- change = true;
- current_dist[i][j] = minimum;
- assert(intermediate_switch >= 0);
- assert(intermediate_switch < latencies[i].size());
- latencies[i][j] = latencies[i][intermediate_switch] + latencies[intermediate_switch][j];
- }
- }
}
- }
}
-static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches)
+Matrix
+shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches)
{
- Matrix dist = weights;
- extend_shortest_path(dist, latencies, inter_switches);
- return dist;
+ Matrix dist = weights;
+ extend_shortest_path(dist, latencies, inter_switches);
+ return dist;
}
-static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final,
- const Matrix& weights, const Matrix& dist)
+bool
+link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final,
+ const Matrix& weights, const Matrix& dist)
{
- return (weights[src][next] + dist[next][final] == dist[src][final]);
+ return weights[src][next] + dist[next][final] == dist[src][final];
}
-static NetDest shortest_path_to_node(SwitchID src, SwitchID next,
- const Matrix& weights, const Matrix& dist)
+NetDest
+shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights,
+ const Matrix& dist)
{
- NetDest result;
- int d = 0;
- int machines;
- int max_machines;
-
- machines = MachineType_NUM;
- max_machines = MachineType_base_number(MachineType_NUM);
-
- for (int m=0; m<machines; m++) {
- for (int i=0; i<MachineType_base_count((MachineType)m); i++) {
- // we use "d+max_machines" below since the "destination" switches for the machines are numbered
- // [MachineType_base_number(MachineType_NUM)...2*MachineType_base_number(MachineType_NUM)-1]
- // for the component network
- if (link_is_shortest_path_to_node(src, next,
- d+max_machines,
- weights, dist)) {
- MachineID mach = {(MachineType)m, i};
- result.add(mach);
- }
- d++;
+ NetDest result;
+ int d = 0;
+ int machines;
+ int max_machines;
+
+ machines = MachineType_NUM;
+ max_machines = MachineType_base_number(MachineType_NUM);
+
+ for (int m = 0; m < machines; m++) {
+ for (int i = 0; i < MachineType_base_count((MachineType)m); i++) {
+ // we use "d+max_machines" below since the "destination"
+ // switches for the machines are numbered
+ // [MachineType_base_number(MachineType_NUM)...
+ // 2*MachineType_base_number(MachineType_NUM)-1] for the
+ // component network
+ if (link_is_shortest_path_to_node(src, next, d + max_machines,
+ weights, dist)) {
+ MachineID mach = {(MachineType)m, i};
+ result.add(mach);
+ }
+ d++;
+ }
}
- }
- DEBUG_MSG(NETWORK_COMP, MedPrio, "returning shortest path");
- DEBUG_EXPR(NETWORK_COMP, MedPrio, (src-(2*max_machines)));
- DEBUG_EXPR(NETWORK_COMP, MedPrio, (next-(2*max_machines)));
- DEBUG_EXPR(NETWORK_COMP, MedPrio, src);
- DEBUG_EXPR(NETWORK_COMP, MedPrio, next);
- DEBUG_EXPR(NETWORK_COMP, MedPrio, result);
- DEBUG_NEWLINE(NETWORK_COMP, MedPrio);
+ DEBUG_MSG(NETWORK_COMP, MedPrio, "returning shortest path");
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, (src-(2*max_machines)));
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, (next-(2*max_machines)));
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, src);
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, next);
+ DEBUG_EXPR(NETWORK_COMP, MedPrio, result);
+ DEBUG_NEWLINE(NETWORK_COMP, MedPrio);
- return result;
+ return result;
}
Topology *
diff --git a/src/mem/ruby/network/simple/Topology.hh b/src/mem/ruby/network/simple/Topology.hh
index 7202c4446..9bcc66c81 100644
--- a/src/mem/ruby/network/simple/Topology.hh
+++ b/src/mem/ruby/network/simple/Topology.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,133 +27,121 @@
*/
/*
- * Topology.hh
- *
- * Description: The topology here is configurable; it can be a hierachical
- * (default one) or a 2D torus or a 2D torus with half switches
- * killed. I think all input port has a
- * one-input-one-output switch connected just to control and
- * bandwidth, since we don't control bandwidth on input ports.
- * Basically, the class has a vector of nodes and edges. First
- * 2*m_nodes elements in the node vector are input and output
- * ports. Edges are represented in two vectors of src and dest
- * nodes. All edges have latency.
- *
- * $Id$
- *
- * */
+ * The topology here is configurable; it can be a hierachical (default
+ * one) or a 2D torus or a 2D torus with half switches killed. I think
+ * all input port has a one-input-one-output switch connected just to
+ * control and bandwidth, since we don't control bandwidth on input
+ * ports. Basically, the class has a vector of nodes and edges. First
+ * 2*m_nodes elements in the node vector are input and output
+ * ports. Edges are represented in two vectors of src and dest
+ * nodes. All edges have latency.
+ */
-#ifndef TOPOLOGY_H
-#define TOPOLOGY_H
+#ifndef __MEM_RUBY_NETWORK_SIMPLE_TOPOLOGY_HH__
+#define __MEM_RUBY_NETWORK_SIMPLE_TOPOLOGY_HH__
#include <iostream>
#include <string>
-#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "sim/sim_object.hh"
-#include "params/Topology.hh"
-#include "params/Link.hh"
#include "params/ExtLink.hh"
#include "params/IntLink.hh"
+#include "params/Link.hh"
+#include "params/Topology.hh"
+#include "sim/sim_object.hh"
class Network;
class NetDest;
-typedef Vector < Vector <int> > Matrix;
+typedef Vector<Vector<int> > Matrix;
-class Link : public SimObject {
+class Link : public SimObject
+{
public:
typedef LinkParams Params;
Link(const Params *p) : SimObject(p) {}
const Params *params() const { return (const Params *)_params; }
};
-
-class ExtLink : public Link {
+class ExtLink : public Link
+{
public:
typedef ExtLinkParams Params;
ExtLink(const Params *p) : Link(p) {}
const Params *params() const { return (const Params *)_params; }
};
-
-class IntLink : public Link {
+class IntLink : public Link
+{
public:
typedef IntLinkParams Params;
IntLink(const Params *p) : Link(p) {}
const Params *params() const { return (const Params *)_params; }
};
-
-class Topology : public SimObject {
-public:
- // Constructors
+class Topology : public SimObject
+{
+ public:
typedef TopologyParams Params;
Topology(const Params *p);
+ virtual ~Topology() {}
const Params *params() const { return (const Params *)_params; }
- // Destructor
- virtual ~Topology() {}
-
- // Public Methods
- int numSwitches() const { return m_number_of_switches; }
- void createLinks(Network *net, bool isReconfiguration);
-
- void initNetworkPtr(Network* net_ptr);
-
- const std::string getName() { return m_name; }
- void printStats(std::ostream& out) const;
- void clearStats();
- void printConfig(std::ostream& out) const;
- void print(std::ostream& out) const { out << "[Topology]"; }
-
-protected:
- // Private Methods
- SwitchID newSwitchID();
- void addLink(SwitchID src, SwitchID dest, int link_latency);
- void addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier);
- void addLink(SwitchID src, SwitchID dest, int link_latency, int bw_multiplier, int link_weight);
- void makeLink(Network *net, SwitchID src, SwitchID dest, const NetDest& routing_table_entry, int link_latency, int weight, int bw_multiplier, bool isReconfiguration);
-
- // void makeSwitchesPerChip(Vector< Vector < SwitchID > > &nodePairs, Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChips);
-
- std::string getDesignStr();
- // Private copy constructor and assignment operator
- Topology(const Topology& obj);
- Topology& operator=(const Topology& obj);
-
- // Data Members (m_ prefix)
- std::string m_name;
- bool m_print_config;
- NodeID m_nodes;
- int m_number_of_switches;
-
- Vector<AbstractController*> m_controller_vector;
-
- Vector<SwitchID> m_links_src_vector;
- Vector<SwitchID> m_links_dest_vector;
- Vector<int> m_links_latency_vector;
- Vector<int> m_links_weight_vector;
- Vector<int> m_bw_multiplier_vector;
-
- Matrix m_component_latencies;
- Matrix m_component_inter_switches;
+ int numSwitches() const { return m_number_of_switches; }
+ void createLinks(Network *net, bool isReconfiguration);
+
+ void initNetworkPtr(Network* net_ptr);
+
+ const std::string getName() { return m_name; }
+ void printStats(std::ostream& out) const;
+ void clearStats();
+ void printConfig(std::ostream& out) const;
+ void print(std::ostream& out) const { out << "[Topology]"; }
+
+ protected:
+ SwitchID newSwitchID();
+ void addLink(SwitchID src, SwitchID dest, int link_latency);
+ void addLink(SwitchID src, SwitchID dest, int link_latency,
+ int bw_multiplier);
+ void addLink(SwitchID src, SwitchID dest, int link_latency,
+ int bw_multiplier, int link_weight);
+ void makeLink(Network *net, SwitchID src, SwitchID dest,
+ const NetDest& routing_table_entry, int link_latency, int weight,
+ int bw_multiplier, bool isReconfiguration);
+
+ //void makeSwitchesPerChip(Vector<Vector< SwitchID> > &nodePairs,
+ // Vector<int> &latencies, Vector<int> &bw_multis, int numberOfChips);
+
+ std::string getDesignStr();
+ // Private copy constructor and assignment operator
+ Topology(const Topology& obj);
+ Topology& operator=(const Topology& obj);
+
+ std::string m_name;
+ bool m_print_config;
+ NodeID m_nodes;
+ int m_number_of_switches;
+
+ Vector<AbstractController*> m_controller_vector;
+
+ Vector<SwitchID> m_links_src_vector;
+ Vector<SwitchID> m_links_dest_vector;
+ Vector<int> m_links_latency_vector;
+ Vector<int> m_links_weight_vector;
+ Vector<int> m_bw_multiplier_vector;
+
+ Matrix m_component_latencies;
+ Matrix m_component_inter_switches;
};
-// Output operator declaration
-std::ostream& operator<<(std::ostream& out, const Topology& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-std::ostream& operator<<(std::ostream& out, const Topology& obj)
+inline std::ostream&
+operator<<(std::ostream& out, const Topology& obj)
{
- obj.print(out);
- out << std::flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif
+#endif // __MEM_RUBY_NETWORK_SIMPLE_TOPOLOGY_HH__
diff --git a/src/mem/ruby/network/simple/Torus2DTopology.cc b/src/mem/ruby/network/simple/Torus2DTopology.cc
deleted file mode 100644
index e66c6dc0b..000000000
--- a/src/mem/ruby/network/simple/Torus2DTopology.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-
-// 2D torus topology
-
-void Torus2DTopology::construct()
-{
- Vector< Vector < SwitchID > > nodePairs; // node pairs extracted from the file
- Vector<int> latencies; // link latencies for each link extracted
- Vector<int> bw_multis; // bw multipliers for each link extracted
-
- Vector < SwitchID > nodes; // temporary buffer
- nodes.setSize(2);
-
- // number of inter-chip switches
- int numberOfTorusSwitches = m_nodes/MachineType_base_level(MachineType_NUM);
- // one switch per machine node grouping
- Vector<SwitchID> torusSwitches;
- for(int i=0; i<numberOfTorusSwitches; i++){
- SwitchID new_switch = newSwitchID();
- torusSwitches.insertAtBottom(new_switch);
- }
-
- makeSwitchesPerChip(nodePairs, latencies, bw_multis, numberOfTorusSwitches);
-
- int lengthOfSide = (int)sqrt((double)numberOfTorusSwitches);
-
- // Now connect the inter-chip torus links
-
- int latency = m_network_ptr->getLinkLatency(); // external link latency
- int bw_multiplier = 1; // external link bw multiplier of the global bandwidth
-
- for(int i=0; i<numberOfTorusSwitches; i++){
- nodes[0] = torusSwitches[i]; // current switch
-
- // left
- if(nodes[0]%lengthOfSide == 0){ // determine left neighbor
- nodes[1] = nodes[0] - 1 + lengthOfSide;
- } else {
- nodes[1] = nodes[0] - 1;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // right
- if((nodes[0] + 1)%lengthOfSide == 0){ // determine right neighbor
- nodes[1] = nodes[0] + 1 - lengthOfSide;
- } else {
- nodes[1] = nodes[0] + 1;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // top
- if(nodes[0] - lengthOfSide < 2*m_nodes){ // determine if node is on the top
- nodes[1] = nodes[0] - lengthOfSide + (lengthOfSide*lengthOfSide);
- } else {
- nodes[1] = nodes[0] - lengthOfSide;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- // bottom
- if(nodes[0] + lengthOfSide >= 2*m_nodes+numberOfTorusSwitches){ // determine if node is on the bottom
- // sorin: bad bug if this is a > instead of a >=
- nodes[1] = nodes[0] + lengthOfSide - (lengthOfSide*lengthOfSide);
- } else {
- nodes[1] = nodes[0] + lengthOfSide;
- }
- nodePairs.insertAtBottom(nodes);
- latencies.insertAtBottom(latency);
- bw_multis.insertAtBottom(bw_multiplier);
-
- }
-
- // add links
- ASSERT(nodePairs.size() == latencies.size() && latencies.size() == bw_multis.size())
- for (int k = 0; k < nodePairs.size(); k++) {
- ASSERT(nodePairs[k].size() == 2);
- addLink(nodePairs[k][0], nodePairs[k][1], latencies[k], bw_multis[k]);
- }
-
-}
diff --git a/src/mem/ruby/network/simple/Torus2DTopology.hh b/src/mem/ruby/network/simple/Torus2DTopology.hh
deleted file mode 100644
index bc50f161a..000000000
--- a/src/mem/ruby/network/simple/Torus2DTopology.hh
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#ifndef TORUS2DTOPOLOGY_H
-#define TORUS2DTOPOLOGY_H
-
-#include "mem/ruby/network/simple/Topology.hh"
-
-class Torus2DTopology : public Topology
-{
-public:
- Torus2DTopology(const string & name);
- void init();
-
-protected:
- void construct();
-};
-
-#endif
diff --git a/src/mem/ruby/network/topologies/Crossbar.py b/src/mem/ruby/network/topologies/Crossbar.py
index 18c8be251..86c53cdef 100644
--- a/src/mem/ruby/network/topologies/Crossbar.py
+++ b/src/mem/ruby/network/topologies/Crossbar.py
@@ -33,7 +33,7 @@ def makeTopology(nodes, options):
ext_links = [ExtLink(ext_node=n, int_node=i)
for (i, n) in enumerate(nodes)]
xbar = len(nodes) # node ID for crossbar switch
- int_links = [IntLink(node_a=i, node_b=xbar) for i in range(len(nodes))]
+ int_links = [IntLink(node_a=i, node_b=xbar) for i in range(len(nodes))]
return Topology(ext_links=ext_links, int_links=int_links,
num_int_nodes=len(nodes)+1)
diff --git a/src/mem/ruby/network/topologies/Mesh.py b/src/mem/ruby/network/topologies/Mesh.py
index 6871bec1f..57913e778 100644
--- a/src/mem/ruby/network/topologies/Mesh.py
+++ b/src/mem/ruby/network/topologies/Mesh.py
@@ -29,28 +29,22 @@
from m5.params import *
from m5.objects import *
-#
# Makes a generic mesh assuming an equal number of cache and directory cntrls
-#
def makeTopology(nodes, options):
num_routers = options.num_cpus
num_rows = options.mesh_rows
-
- #
+
# There must be an evenly divisible number of cntrls to routers
# Also, obviously the number or rows must be <= the number of routers
- #
cntrls_per_router, remainder = divmod(len(nodes), num_routers)
assert(num_rows <= num_routers)
num_columns = int(num_routers / num_rows)
assert(num_columns * num_rows == num_routers)
- #
# Add all but the remainder nodes to the list of nodes to be uniformly
# distributed across the network.
- #
network_nodes = []
remainder_nodes = []
for node_index in xrange(len(nodes)):
@@ -59,27 +53,22 @@ def makeTopology(nodes, options):
else:
remainder_nodes.append(nodes[node_index])
- #
# Connect each node to the appropriate router
- #
ext_links = []
for (i, n) in enumerate(network_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < cntrls_per_router)
ext_links.append(ExtLink(ext_node=n, int_node=router_id))
- #
- # Connect the remainding nodes to router 0. These should only be DMA nodes.
- #
+ # Connect the remainding nodes to router 0. These should only be
+ # DMA nodes.
for (i, node) in enumerate(remainder_nodes):
assert(node.type == 'DMA_Controller')
assert(i < remainder)
ext_links.append(ExtLink(ext_node=node, int_node=0))
-
- #
+
# Create the mesh links. First row (east-west) links then column
# (north-south) links
- #
int_links = []
for row in xrange(num_rows):
for col in xrange(num_columns):
diff --git a/src/mem/ruby/network/topologies/MeshDirCorners.py b/src/mem/ruby/network/topologies/MeshDirCorners.py
index 8b08241ae..b87b749f6 100644
--- a/src/mem/ruby/network/topologies/MeshDirCorners.py
+++ b/src/mem/ruby/network/topologies/MeshDirCorners.py
@@ -29,20 +29,16 @@
from m5.params import *
from m5.objects import *
-#
-# This file contains a special network creation function. This networks is not
-# general and will only work with specific system configurations. The network
-# specified is similar to GEMS old file specified network.
-#
+# This file contains a special network creation function. This
+# networks is not general and will only work with specific system
+# configurations. The network specified is similar to GEMS old file
+# specified network.
def makeTopology(nodes, options):
-
num_routers = options.num_cpus
num_rows = options.mesh_rows
-
- #
+
# First determine which nodes are cache cntrls vs. dirs vs. dma
- #
cache_nodes = []
dir_nodes = []
dma_nodes = []
@@ -54,12 +50,11 @@ def makeTopology(nodes, options):
dir_nodes.append(node)
elif node.type == 'DMA_Controller':
dma_nodes.append(node)
-
- #
- # Obviously the number or rows must be <= the number of routers and evenly
- # divisible. Also the number of caches must be a multiple of the number of
- # routers and the number of directories must be four.
- #
+
+ # Obviously the number or rows must be <= the number of routers
+ # and evenly divisible. Also the number of caches must be a
+ # multiple of the number of routers and the number of directories
+ # must be four.
assert(num_rows <= num_routers)
num_columns = int(num_routers / num_rows)
assert(num_columns * num_rows == num_routers)
@@ -67,37 +62,31 @@ def makeTopology(nodes, options):
assert(remainder == 0)
assert(len(dir_nodes) == 4)
- #
# Connect each cache controller to the appropriate router
- #
ext_links = []
for (i, n) in enumerate(cache_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < caches_per_router)
ext_links.append(ExtLink(ext_node=n, int_node=router_id))
- #
# Connect the dir nodes to the corners.
- #
ext_links.append(ExtLink(ext_node=dir_nodes[0], int_node=0))
- ext_links.append(ExtLink(ext_node=dir_nodes[1], int_node=(num_columns - 1)))
+ ext_links.append(ExtLink(ext_node=dir_nodes[1],
+ int_node=(num_columns - 1)))
ext_links.append(ExtLink(ext_node=dir_nodes[2],
int_node=(num_routers - num_columns)))
- ext_links.append(ExtLink(ext_node=dir_nodes[3], int_node=(num_routers - 1)))
+ ext_links.append(ExtLink(ext_node=dir_nodes[3],
+ int_node=(num_routers - 1)))
- #
# Connect the dma nodes to router 0. These should only be DMA nodes.
- #
for (i, node) in enumerate(dma_nodes):
assert(node.type == 'DMA_Controller')
ext_links.append(ExtLink(ext_node=node, int_node=0))
-
- #
+
# Create the mesh links. First row (east-west) links then column
# (north-south) links
- #
int_links = []
for row in xrange(num_rows):
for col in xrange(num_columns):
diff --git a/src/mem/ruby/network/topologies/SConscript b/src/mem/ruby/network/topologies/SConscript
index 71ee7809c..649769ed0 100644
--- a/src/mem/ruby/network/topologies/SConscript
+++ b/src/mem/ruby/network/topologies/SConscript
@@ -1,3 +1,5 @@
+# -*- mode:python -*-
+
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc
index 2d7d655c0..cebf22fd8 100644
--- a/src/mem/ruby/profiler/AddressProfiler.cc
+++ b/src/mem/ruby/profiler/AddressProfiler.cc
@@ -47,7 +47,7 @@ lookupTraceForAddress(const Address& addr, AddressMap* record_map)
}
void
-printSorted(ostream& out, int num_of_sequencers, const AddressMap* record_map,
+printSorted(ostream& out, int num_of_sequencers, const AddressMap* record_map,
string description)
{
const int records_printed = 100;
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
index 2cc3eddfc..559c16900 100644
--- a/src/mem/ruby/profiler/Profiler.cc
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -125,7 +125,7 @@ Profiler::wakeup()
<< "mbytes_total: " << process_memory_total() << endl;
if (process_memory_total() > 0) {
- out << "resident_ratio: "
+ out << "resident_ratio: "
<< process_memory_resident() / process_memory_total() << endl;
}
@@ -158,7 +158,7 @@ Profiler::setPeriodicStatsFile(const string& filename)
void
Profiler::setPeriodicStatsInterval(integer_t period)
{
- cout << "Recording periodic statistics every " << m_stats_period
+ cout << "Recording periodic statistics every " << m_stats_period
<< " Ruby cycles" << endl;
m_stats_period = period;
@@ -227,7 +227,7 @@ Profiler::printStats(ostream& out, bool short_stats)
out << "mbytes_resident: " << process_memory_resident() << endl;
out << "mbytes_total: " << process_memory_total() << endl;
if (process_memory_total() > 0) {
- out << "resident_ratio: "
+ out << "resident_ratio: "
<< process_memory_resident()/process_memory_total() << endl;
}
out << endl;
@@ -633,7 +633,7 @@ static double
process_memory_total()
{
// 4kB page size, 1024*1024 bytes per MB,
- const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
+ const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0);
ifstream proc_file;
proc_file.open("/proc/self/statm");
int total_size_in_pages = 0;
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc
index 09c4e0597..495a38fc8 100644
--- a/src/mem/ruby/recorder/CacheRecorder.cc
+++ b/src/mem/ruby/recorder/CacheRecorder.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,53 +26,50 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id$
- *
- */
+#include "gzstream.hh"
+#include "mem/gems_common/PrioHeap.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/ruby/recorder/TraceRecord.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-#include "mem/gems_common/PrioHeap.hh"
-#include "gzstream.hh"
CacheRecorder::CacheRecorder()
{
- m_records_ptr = new PrioHeap<TraceRecord>;
+ m_records_ptr = new PrioHeap<TraceRecord>;
}
CacheRecorder::~CacheRecorder()
{
- delete m_records_ptr;
+ delete m_records_ptr;
}
-void CacheRecorder::addRecord(Sequencer* sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time)
+void
+CacheRecorder::addRecord(Sequencer* sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time)
{
- m_records_ptr->insert(TraceRecord(sequencer, data_addr, pc_addr, type, time));
+ m_records_ptr->
+ insert(TraceRecord(sequencer, data_addr, pc_addr, type, time));
}
-int CacheRecorder::dumpRecords(string filename)
+int
+CacheRecorder::dumpRecords(string filename)
{
- ogzstream out(filename.c_str());
- if (out.fail()) {
- cout << "Error: error opening file '" << filename << "'" << endl;
- return 0;
- }
+ ogzstream out(filename.c_str());
+ if (out.fail()) {
+ cout << "Error: error opening file '" << filename << "'" << endl;
+ return 0;
+ }
- int counter = 0;
- while (m_records_ptr->size() != 0) {
- TraceRecord record = m_records_ptr->extractMin();
- record.output(out);
- counter++;
- }
- return counter;
+ int counter = 0;
+ while (m_records_ptr->size() != 0) {
+ TraceRecord record = m_records_ptr->extractMin();
+ record.output(out);
+ counter++;
+ }
+ return counter;
}
-void CacheRecorder::print(ostream& out) const
+void
+CacheRecorder::print(ostream& out) const
{
}
diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh
index 2616f73ae..18c246c9f 100644
--- a/src/mem/ruby/recorder/CacheRecorder.hh
+++ b/src/mem/ruby/recorder/CacheRecorder.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,15 +27,12 @@
*/
/*
- * $Id$
- *
- * Description: Recording cache requests made to a ruby cache at certain
- * ruby time. Also dump the requests to a gziped file.
- *
+ * Recording cache requests made to a ruby cache at certain ruby
+ * time. Also dump the requests to a gziped file.
*/
-#ifndef CACHERECORDER_H
-#define CACHERECORDER_H
+#ifndef __MEM_RUBY_RECORDER_CACHERECORDER_HH__
+#define __MEM_RUBY_RECORDER_CACHERECORDER_HH__
#include <iostream>
#include <string>
@@ -51,46 +47,32 @@ class Address;
class TraceRecord;
class Sequencer;
-class CacheRecorder {
-public:
- // Constructors
- CacheRecorder();
-
- // Destructor
- ~CacheRecorder();
+class CacheRecorder
+{
+ public:
+ CacheRecorder();
+ ~CacheRecorder();
- // Public Methods
- void addRecord(Sequencer* sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time);
- int dumpRecords(std::string filename);
+ void addRecord(Sequencer* sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time);
+ int dumpRecords(std::string filename);
- void print(std::ostream& out) const;
-private:
- // Private Methods
+ void print(std::ostream& out) const;
- // Private copy constructor and assignment operator
- CacheRecorder(const CacheRecorder& obj);
- CacheRecorder& operator=(const CacheRecorder& obj);
+ private:
+ // Private copy constructor and assignment operator
+ CacheRecorder(const CacheRecorder& obj);
+ CacheRecorder& operator=(const CacheRecorder& obj);
- // Data Members (m_ prefix)
- PrioHeap<TraceRecord>* m_records_ptr;
+ PrioHeap<TraceRecord>* m_records_ptr;
};
-// Output operator declaration
-std::ostream& operator<<(std::ostream& out, const CacheRecorder& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-std::ostream& operator<<(std::ostream& out, const CacheRecorder& obj)
+inline std::ostream&
+operator<<(std::ostream& out, const CacheRecorder& obj)
{
- obj.print(out);
- out << std::flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif //CACHERECORDER_H
+#endif // __MEM_RUBY_RECORDER_CACHERECORDER_HH__
diff --git a/src/mem/ruby/recorder/TraceRecord.cc b/src/mem/ruby/recorder/TraceRecord.cc
index 81d1e17d8..8c8daa051 100644
--- a/src/mem/ruby/recorder/TraceRecord.cc
+++ b/src/mem/ruby/recorder/TraceRecord.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,125 +26,114 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id$
- *
- */
-
+#include "mem/protocol/CacheMsg.hh"
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
-#include "mem/protocol/CacheMsg.hh"
#include "sim/sim_object.hh"
-TraceRecord::TraceRecord(Sequencer* _sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time)
+TraceRecord::TraceRecord(Sequencer* _sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time)
{
- m_sequencer_ptr = _sequencer;
- m_data_address = data_addr;
- m_pc_address = pc_addr;
- m_time = time;
- m_type = type;
-
- // Don't differentiate between store misses and atomic requests in
- // the trace
- if (m_type == RubyRequestType_Locked_Read) {
- m_type = RubyRequestType_ST;
- }
- else if (m_type == RubyRequestType_Locked_Write) {
- m_type = RubyRequestType_ST;
- }
+ m_sequencer_ptr = _sequencer;
+ m_data_address = data_addr;
+ m_pc_address = pc_addr;
+ m_time = time;
+ m_type = type;
+
+ // Don't differentiate between store misses and atomic requests in
+ // the trace
+ if (m_type == RubyRequestType_Locked_Read) {
+ m_type = RubyRequestType_ST;
+ } else if (m_type == RubyRequestType_Locked_Write) {
+ m_type = RubyRequestType_ST;
+ }
}
-// Public copy constructor and assignment operator
TraceRecord::TraceRecord(const TraceRecord& obj)
{
- *this = obj; // Call assignment operator
+ // Call assignment operator
+ *this = obj;
}
-TraceRecord& TraceRecord::operator=(const TraceRecord& obj)
+TraceRecord&
+TraceRecord::operator=(const TraceRecord& obj)
{
- m_sequencer_ptr = obj.m_sequencer_ptr;
- m_time = obj.m_time;
- m_data_address = obj.m_data_address;
- m_pc_address = obj.m_pc_address;
- m_type = obj.m_type;
- return *this;
+ m_sequencer_ptr = obj.m_sequencer_ptr;
+ m_time = obj.m_time;
+ m_data_address = obj.m_data_address;
+ m_pc_address = obj.m_pc_address;
+ m_type = obj.m_type;
+ return *this;
}
-void TraceRecord::issueRequest() const
+void
+TraceRecord::issueRequest() const
{
- assert(m_sequencer_ptr != NULL);
-
- RubyRequest request(m_data_address.getAddress(),
- NULL,
- RubySystem::getBlockSizeBytes(),
- m_pc_address.getAddress(),
- m_type,
- RubyAccessMode_User,
- NULL);
-
- // Clear out the sequencer
- while (!m_sequencer_ptr->empty()) {
- g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
- }
-
- m_sequencer_ptr->makeRequest(request);
-
- // Clear out the sequencer
- while (!m_sequencer_ptr->empty()) {
- g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
- }
+ assert(m_sequencer_ptr != NULL);
+
+ RubyRequest request(m_data_address.getAddress(), NULL,
+ RubySystem::getBlockSizeBytes(), m_pc_address.getAddress(),
+ m_type, RubyAccessMode_User, NULL);
+
+ // Clear out the sequencer
+ while (!m_sequencer_ptr->empty()) {
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
+ }
+
+ m_sequencer_ptr->makeRequest(request);
+
+ // Clear out the sequencer
+ while (!m_sequencer_ptr->empty()) {
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 100);
+ }
}
-void TraceRecord::print(ostream& out) const
+void
+TraceRecord::print(ostream& out) const
{
- out << "[TraceRecord: Node, " << m_sequencer_ptr->name() << ", "
- << m_data_address << ", " << m_pc_address << ", "
- << m_type << ", Time: " << m_time << "]";
+ out << "[TraceRecord: Node, " << m_sequencer_ptr->name() << ", "
+ << m_data_address << ", " << m_pc_address << ", "
+ << m_type << ", Time: " << m_time << "]";
}
-void TraceRecord::output(ostream& out) const
+void
+TraceRecord::output(ostream& out) const
{
- out << m_sequencer_ptr->name() << " ";
- m_data_address.output(out);
- out << " ";
- m_pc_address.output(out);
- out << " ";
- out << m_type;
- out << endl;
+ out << m_sequencer_ptr->name() << " ";
+ m_data_address.output(out);
+ out << " ";
+ m_pc_address.output(out);
+ out << " ";
+ out << m_type;
+ out << endl;
}
-bool TraceRecord::input(istream& in)
+bool
+TraceRecord::input(istream& in)
{
- string sequencer_name;
- in >> sequencer_name;
-
- //
- // The SimObject find function is slow and iterates through the
- // simObjectList to find the sequencer pointer. Therefore, expect trace
- // playback to be slow.
- //
- m_sequencer_ptr = (Sequencer*)SimObject::find(sequencer_name.c_str());
-
- m_data_address.input(in);
- m_pc_address.input(in);
- string type;
- if (!in.eof()) {
+ string sequencer_name;
+ in >> sequencer_name;
+
+ // The SimObject find function is slow and iterates through the
+ // simObjectList to find the sequencer pointer. Therefore, expect
+ // trace playback to be slow.
+ m_sequencer_ptr = (Sequencer*)SimObject::find(sequencer_name.c_str());
+
+ m_data_address.input(in);
+ m_pc_address.input(in);
+ if (in.eof())
+ return false;
+
+ string type;
in >> type;
m_type = string_to_RubyRequestType(type);
// Ignore the rest of the line
char c = '\0';
while ((!in.eof()) && (c != '\n')) {
- in.get(c);
+ in.get(c);
}
return true;
- } else {
- return false;
- }
}
diff --git a/src/mem/ruby/recorder/TraceRecord.hh b/src/mem/ruby/recorder/TraceRecord.hh
index a187f2643..c8dae1986 100644
--- a/src/mem/ruby/recorder/TraceRecord.hh
+++ b/src/mem/ruby/recorder/TraceRecord.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,84 +27,69 @@
*/
/*
- * $Id$
- *
- * Description: A entry in the cache request record. It is aware of
- * the ruby time and can issue the request back to the
- * cache.
- *
+ * A entry in the cache request record. It is aware of the ruby time
+ * and can issue the request back to the cache.
*/
-#ifndef TRACERECORD_H
-#define TRACERECORD_H
+#ifndef __MEM_RUBY_RECORDER_TRACERECORD_HH__
+#define __MEM_RUBY_RECORDER_TRACERECORD_HH__
-#include "mem/ruby/libruby_internal.hh"
-#include "mem/ruby/system/Sequencer.hh"
-#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/libruby_internal.hh"
#include "mem/ruby/system/NodeID.hh"
-class CacheMsg;
-
-class TraceRecord {
-public:
- // Constructors
- TraceRecord(Sequencer* _sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time);
-
- TraceRecord() {
- m_sequencer_ptr = NULL;
- m_time = 0;
- m_type = RubyRequestType_NULL;
- }
-
- // Destructor
- // ~TraceRecord();
-
- // Public copy constructor and assignment operator
- TraceRecord(const TraceRecord& obj);
- TraceRecord& operator=(const TraceRecord& obj);
-
- // Public Methods
- bool node_less_then_eq(const TraceRecord& rec) const { return (this->m_time <= rec.m_time); }
- void issueRequest() const;
+#include "mem/ruby/system/Sequencer.hh"
- void print(ostream& out) const;
- void output(ostream& out) const;
- bool input(istream& in);
-private:
- // Private Methods
+class CacheMsg;
- // Data Members (m_ prefix)
- Sequencer* m_sequencer_ptr;
- Time m_time;
- Address m_data_address;
- Address m_pc_address;
- RubyRequestType m_type;
+class TraceRecord
+{
+ public:
+ TraceRecord(Sequencer* _sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time);
+
+ TraceRecord()
+ {
+ m_sequencer_ptr = NULL;
+ m_time = 0;
+ m_type = RubyRequestType_NULL;
+ }
+
+ TraceRecord(const TraceRecord& obj);
+ TraceRecord& operator=(const TraceRecord& obj);
+
+ bool
+ node_less_then_eq(const TraceRecord& rec) const
+ {
+ return this->m_time <= rec.m_time;
+ }
+
+ void issueRequest() const;
+
+ void print(ostream& out) const;
+ void output(ostream& out) const;
+ bool input(istream& in);
+
+ private:
+ Sequencer* m_sequencer_ptr;
+ Time m_time;
+ Address m_data_address;
+ Address m_pc_address;
+ RubyRequestType m_type;
};
-inline extern bool node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2);
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const TraceRecord& obj);
-
-// ******************* Definitions *******************
-
-inline extern
-bool node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2)
+inline bool
+node_less_then_eq(const TraceRecord& n1, const TraceRecord& n2)
{
- return n1.node_less_then_eq(n2);
+ return n1.node_less_then_eq(n2);
}
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const TraceRecord& obj)
+inline std::ostream&
+operator<<(ostream& out, const TraceRecord& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif //TRACERECORD_H
+#endif // __MEM_RUBY_RECORDER_TRACERECORD_HH__
diff --git a/src/mem/ruby/recorder/Tracer.cc b/src/mem/ruby/recorder/Tracer.cc
index 5a20c2b02..e2d216ba3 100644
--- a/src/mem/ruby/recorder/Tracer.cc
+++ b/src/mem/ruby/recorder/Tracer.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,131 +26,107 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * $Id$
- *
- */
-
-#include "mem/ruby/recorder/Tracer.hh"
-#include "mem/ruby/recorder/TraceRecord.hh"
-#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "base/cprintf.hh"
#include "mem/gems_common/PrioHeap.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "mem/ruby/recorder/TraceRecord.hh"
+#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/system/System.hh"
-//added by SS
Tracer::Tracer(const Params *p)
: SimObject(p)
{
- m_enabled = false;
- m_warmup_length = p->warmup_length;
- assert(m_warmup_length > 0);
- RubySystem::m_tracer_ptr = this;
-}
-
-//commented by SS
-//Tracer::Tracer()
-//{
-// m_enabled = false;
-//}
-
-Tracer::~Tracer()
-{
+ m_enabled = false;
+ m_warmup_length = p->warmup_length;
+ assert(m_warmup_length > 0);
+ RubySystem::m_tracer_ptr = this;
}
-void Tracer::init()
+void
+Tracer::startTrace(std::string filename)
{
+ if (m_enabled)
+ stopTrace();
+
+ if (filename != "") {
+ m_trace_file.open(filename.c_str());
+ if (m_trace_file.fail()) {
+ cprintf("Error: error opening file '%s'\n", filename);
+ cprintf("Trace not enabled.\n");
+ return;
+ }
+ cprintf("Request trace enabled to output file '%s'\n", filename);
+ m_enabled = true;
+ }
}
-void Tracer::startTrace(std::string filename)
+void
+Tracer::stopTrace()
{
- if (m_enabled) {
- stopTrace();
- }
-
- if (filename != "") {
- m_trace_file.open(filename.c_str());
- if (m_trace_file.fail()) {
- cout << "Error: error opening file '" << filename << "'" << endl;
- cout << "Trace not enabled." << endl;
- return;
+ if (m_enabled) {
+ m_trace_file.close();
+ cout << "Request trace file closed." << endl;
+ m_enabled = false;
}
- cout << "Request trace enabled to output file '" << filename << "'" << endl;
- m_enabled = true;
- }
}
-void Tracer::stopTrace()
+void
+Tracer::traceRequest(Sequencer* sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time)
{
- if (m_enabled == true) {
- m_trace_file.close();
- cout << "Request trace file closed." << endl;
- m_enabled = false;
- }
+ assert(m_enabled);
+ TraceRecord tr(sequencer, data_addr, pc_addr, type, time);
+ tr.output(m_trace_file);
}
-void Tracer::traceRequest(Sequencer* sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time)
+int
+Tracer::playbackTrace(std::string filename)
{
- assert(m_enabled == true);
- TraceRecord tr(sequencer, data_addr, pc_addr, type, time);
- tr.output(m_trace_file);
-}
+ igzstream in(filename.c_str());
+ if (in.fail()) {
+ cprintf("Error: error opening file '%s'\n", filename);
+ return 0;
+ }
-// Class method
-int Tracer::playbackTrace(std::string filename)
-{
- igzstream in(filename.c_str());
- if (in.fail()) {
- cout << "Error: error opening file '" << filename << "'" << endl;
- return 0;
- }
-
- time_t start_time = time(NULL);
-
- TraceRecord record;
- int counter = 0;
- // Read in the next TraceRecord
- bool ok = record.input(in);
- while (ok) {
- // Put it in the right cache
- record.issueRequest();
- counter++;
+ time_t start_time = time(NULL);
+ TraceRecord record;
+ int counter = 0;
// Read in the next TraceRecord
- ok = record.input(in);
-
- // Clear the statistics after warmup
-/* if (counter == m_warmup_length) {
- cout << "Clearing stats after warmup of length " << m_warmup_length << endl;
- g_system_ptr->clearStats();
+ bool ok = record.input(in);
+ while (ok) {
+ // Put it in the right cache
+ record.issueRequest();
+ counter++;
+
+ // Read in the next TraceRecord
+ ok = record.input(in);
+
+ // Clear the statistics after warmup
+ if (counter == m_warmup_length) {
+ cprintf("Clearing stats after warmup of length %s\n",
+ m_warmup_length);
+ g_system_ptr->clearStats();
+ }
}
-*/
- if (counter == m_warmup_length) {
- cout << "Clearing stats after warmup of length " << m_warmup_length << endl;
- g_system_ptr->clearStats();
- }
-
- }
- // Flush the prefetches through the system
- g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 1000); // FIXME - should be smarter
+ // Flush the prefetches through the system
+ // FIXME - should be smarter
+ g_eventQueue_ptr->triggerEvents(g_eventQueue_ptr->getTime() + 1000);
- time_t stop_time = time(NULL);
- double seconds = difftime(stop_time, start_time);
- double minutes = seconds / 60.0;
- cout << "playbackTrace: " << minutes << " minutes" << endl;
+ time_t stop_time = time(NULL);
+ double seconds = difftime(stop_time, start_time);
+ double minutes = seconds / 60.0;
+ cout << "playbackTrace: " << minutes << " minutes" << endl;
- return counter;
+ return counter;
}
-void Tracer::print(std::ostream& out) const
+void
+Tracer::print(std::ostream& out) const
{
}
-
Tracer *
RubyTracerParams::create()
{
diff --git a/src/mem/ruby/recorder/Tracer.hh b/src/mem/ruby/recorder/Tracer.hh
index a068c32eb..d468b4920 100644
--- a/src/mem/ruby/recorder/Tracer.hh
+++ b/src/mem/ruby/recorder/Tracer.hh
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,87 +27,64 @@
*/
/*
- * $Id$
- *
- * Description: Controller class of the tracer. Can stop/start/playback
- * the ruby cache requests trace.
- *
+ * Controller class of the tracer. Can stop/start/playback the ruby
+ * cache requests trace.
*/
-#ifndef TRACER_H
-#define TRACER_H
+#ifndef __MEM_RUBY_RECORDER_TRACER_HH__
+#define __MEM_RUBY_RECORDER_TRACER_HH__
#include <iostream>
#include <string>
-#include "mem/ruby/libruby_internal.hh"
+#include "gzstream.hh"
+#include "mem/protocol/CacheRequestType.hh"
#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/libruby_internal.hh"
#include "mem/ruby/system/NodeID.hh"
-#include "mem/protocol/CacheRequestType.hh"
-#include "sim/sim_object.hh"
-
#include "params/RubyTracer.hh"
-
-#include "gzstream.hh"
+#include "sim/sim_object.hh"
template <class TYPE> class PrioHeap;
class Address;
class TraceRecord;
class Sequencer;
-class Tracer : public SimObject {
-public:
- // Constructors
-// Tracer();
+class Tracer : public SimObject
+{
+ public:
typedef RubyTracerParams Params;
- Tracer(const Params *p);
-
- // Destructor
- ~Tracer();
+ Tracer(const Params *p);
- // Public Methods
- void startTrace(std::string filename);
- void stopTrace();
- bool traceEnabled() { return m_enabled; }
- void traceRequest(Sequencer* sequencer,
- const Address& data_addr,
- const Address& pc_addr,
- RubyRequestType type,
- Time time);
+ void startTrace(std::string filename);
+ void stopTrace();
+ bool traceEnabled() { return m_enabled; }
+ void traceRequest(Sequencer* sequencer, const Address& data_addr,
+ const Address& pc_addr, RubyRequestType type, Time time);
- void print(std::ostream& out) const;
+ void print(std::ostream& out) const;
- // Public Class Methods
- int playbackTrace(std::string filename);
- void init();
-private:
- // Private Methods
+ int playbackTrace(std::string filename);
- // Private copy constructor and assignment operator
- Tracer(const Tracer& obj);
- Tracer& operator=(const Tracer& obj);
+ private:
+ // Private copy constructor and assignment operator
+ Tracer(const Tracer& obj);
+ Tracer& operator=(const Tracer& obj);
- // Data Members (m_ prefix)
- ogzstream m_trace_file;
- bool m_enabled;
+ ogzstream m_trace_file;
+ bool m_enabled;
- //added by SS
- int m_warmup_length;
+ //added by SS
+ int m_warmup_length;
};
-// Output operator declaration
-std::ostream& operator<<(std::ostream& out, const Tracer& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-std::ostream& operator<<(std::ostream& out, const Tracer& obj)
+inline std::ostream&
+operator<<(std::ostream& out, const Tracer& obj)
{
- obj.print(out);
- out << std::flush;
- return out;
+ obj.print(out);
+ out << std::flush;
+ return out;
}
-#endif //TRACER_H
+#endif // __MEM_RUBY_RECORDER_TRACER_HH__
diff --git a/src/mem/ruby/storebuffer/hfa.hh b/src/mem/ruby/storebuffer/hfa.hh
deleted file mode 100644
index abcd96495..000000000
--- a/src/mem/ruby/storebuffer/hfa.hh
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// this code was modified to fit into Rochs
-
-#ifndef _HFA_H_
-#define _HFA_H_
-
-using namespace std;
-
-/*
- * Global include file for entire project.
- * Should be included first in all ".cc" project files
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-#include "mem/ruby/common/Global.hh"
-#include <string>
-#include <map>
-#include <set>
-#include <list>
-#include <fstream>
-#include <iostream>
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h> // va_start(), va_end()
-#include <strings.h> // declaration of bzero()
-
-#include <sys/time.h> // gettimeofday() includes
-#include <errno.h>
-#include <unistd.h>
-
-/*------------------------------------------------------------------------*/
-/* Type Includes */
-/*------------------------------------------------------------------------*/
-
-#include "mem/ruby/storebuffer/hfatypes.hh"
-
-/*------------------------------------------------------------------------*/
-/* Forward class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-class wait_list_t;
-class waiter_t;
-class free_list_t;
-class pipestate_t;
-class pipepool_t;
-
-
-/** Maximum size of a load or store that may occur to/from the memory system.
- * (in 64-bit quantities). Currently this is set to 8 * 64-bits = 64-bytes.
- */
-const uint32 MEMOP_MAX_SIZE = 8;
-
-/** 64-bit int memory masks */
-#define MEM_BYTE_MASK 0x00000000000000ffULL
-#define MEM_HALF_MASK 0x000000000000ffffULL
-#define MEM_WORD_MASK 0x00000000ffffffffULL
-#define MEM_EXTD_MASK 0xffffffffffffffffULL
-#define MEM_QUAD_MASK 0xffffffffffffffffULL
-
-#define ISEQ_MASK 0x0000ffffffffffffULL
-
-/*------------------------------------------------------------------------*/
-/* Configuration Parameters */
-/*------------------------------------------------------------------------*/
-
-#define SIM_HALT assert(0);
-
-#include <assert.h>
-
-#endif /* _HFA_H_ */
-
-
diff --git a/src/mem/ruby/storebuffer/hfatypes.hh b/src/mem/ruby/storebuffer/hfatypes.hh
deleted file mode 100644
index c4d0de2e6..000000000
--- a/src/mem/ruby/storebuffer/hfatypes.hh
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _HFATYPES_H_
-#define _HFATYPES_H_
-
-/*
- * Global include file for entire project.
- * Should be included first in all ".cc" project files
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-/*------------------------------------------------------------------------*/
-/* SimIcs Includes */
-/*------------------------------------------------------------------------*/
-
-/* import C functions */
-
-
-/*------------------------------------------------------------------------*/
-/* Forward class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------------*/
-/* Macro declarations */
-/*------------------------------------------------------------------------*/
-
-// definitions of MAX / MIN (if needed)
-#ifndef MAX
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
-#ifndef MIN
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#endif
-
-/* Statistics tracking definition */
-#define STAT_INC(A) (A)++
-
-/*------------------------------------------------------------------------*/
-/* Enumerations */
-/*------------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------------*/
-/* Project Includes */
-/*------------------------------------------------------------------------*/
-
-typedef unsigned char byte_t; /* byte - 8 bits */
-typedef unsigned short half_t; /* half - 16 bits */
-typedef unsigned int word_t; /* word - 32 bits */
-typedef uint64 tick_t; /* time - 64 bit */
-
-#endif /* _HFATYPES_H_ */
diff --git a/src/mem/ruby/storebuffer/interface.cc b/src/mem/ruby/storebuffer/interface.cc
deleted file mode 100644
index 1ee6ee3a0..000000000
--- a/src/mem/ruby/storebuffer/interface.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "mem/ruby/libruby.hh"
-#include "writebuffer.hh"
-#include <iostream>
-
-writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_ST);
- if (writebuffer->writeBufferFull()){
- return WB_FULL;
- }
- else if (writebuffer->writeBufferFlushing()) {
- return WB_FLUSHING;
- }
- else {
- writebuffer->addToWriteBuffer(request);
- return WB_OK;
- }
-}
-
-uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_LD);
- return writebuffer->handleLoad(request);
-}
-
-uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request) {
- // flush the store buffer
- writebuffer->flushWriteBuffer();
- // let writebuffer issue atomic
- //return writebuffer->issueAtomic(request);
-}
-
-void flushSTB(writebuffer_t * writebuffer) {
- // in in-order can't get a request to flushSTB if already flushing
- // on out of order, have to check if already flushing
- writebuffer->flushWriteBuffer();
-}
-
-void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id)) {
- writebuffer->registerHitCallback(hit_callback);
-}
diff --git a/src/mem/ruby/storebuffer/interface.hh b/src/mem/ruby/storebuffer/interface.hh
deleted file mode 100644
index cbf010275..000000000
--- a/src/mem/ruby/storebuffer/interface.hh
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef STB_H
-#define STB_H
-
-#include "mem/ruby/libruby.hh"
-#include "writebuffer.hh"
-#include <iostream>
-
-writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request);
-
-uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request);
-
-uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request);
-
-void flushSTB(writebuffer_t * writebuffer);
-
-void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id));
-
-#endif
diff --git a/src/mem/ruby/storebuffer/stb_interface.cc b/src/mem/ruby/storebuffer/stb_interface.cc
index cc93d3b51..e3d6f29ed 100644
--- a/src/mem/ruby/storebuffer/stb_interface.cc
+++ b/src/mem/ruby/storebuffer/stb_interface.cc
@@ -27,49 +27,59 @@
*/
#include <iostream>
+
#include "mem/ruby/storebuffer/stb_interface.hh"
-StoreBuffer * createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size) {
- StoreBuffer * stb = new StoreBuffer(id, block_bits, storebuffer_size);
- return stb;
+StoreBuffer *
+createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size)
+{
+ StoreBuffer *stb = new StoreBuffer(id, block_bits, storebuffer_size);
+ return stb;
}
-storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_ST);
- if (storebuffer->storeBufferFull()){
- return WB_FULL;
- }
- else if (storebuffer->storeBufferFlushing()) {
- return WB_FLUSHING;
- }
- else {
- storebuffer->addToStoreBuffer(request);
- return WB_OK;
- }
+storebuffer_status_t
+handleStore(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ assert(request.type == RubyRequestType_ST);
+ if (storebuffer->storeBufferFull()){
+ return WB_FULL;
+ } else if (storebuffer->storeBufferFlushing()) {
+ return WB_FLUSHING;
+ } else {
+ storebuffer->addToStoreBuffer(request);
+ return WB_OK;
+ }
}
-uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_LD);
- return storebuffer->handleLoad(request);
+uint64_t
+handleLoad(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ assert(request.type == RubyRequestType_LD);
+ return storebuffer->handleLoad(request);
}
#if 0
-uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request) {
- // flush the store buffer
- storebuffer->flushStoreBuffer();
- // let storebuffer issue atomic
- //return storebuffer->issueAtomic(request);
+uint64_t
+handleAtomic(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ // flush the store buffer
+ storebuffer->flushStoreBuffer();
+ // let storebuffer issue atomic
+ // return storebuffer->issueAtomic(request);
}
#endif
-void flushSTB(StoreBuffer * storebuffer) {
- // in in-order can't get a request to flushSTB if already flushing
- // on out of order, have to check if already flushing
- storebuffer->flushStoreBuffer();
+void
+flushSTB(StoreBuffer *storebuffer)
+{
+ // in in-order can't get a request to flushSTB if already flushing
+ // on out of order, have to check if already flushing
+ storebuffer->flushStoreBuffer();
}
-void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id)) {
- storebuffer->registerHitCallback(hit_callback);
+void
+registerHitCallback(StoreBuffer *storebuffer,
+ void (*hit_callback)(int64_t access_id))
+{
+ storebuffer->registerHitCallback(hit_callback);
}
-
-
diff --git a/src/mem/ruby/storebuffer/stb_interface.hh b/src/mem/ruby/storebuffer/stb_interface.hh
index e1a026abc..b7f1b152d 100644
--- a/src/mem/ruby/storebuffer/stb_interface.hh
+++ b/src/mem/ruby/storebuffer/stb_interface.hh
@@ -27,16 +27,12 @@
*/
#include "mem/ruby/storebuffer/storebuffer.hh"
-#include <iostream>
-StoreBuffer * createNewSTB (uint32 id, uint32 block_bits, int storebuffer_size);
-
-storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request);
-
-uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request);
-
-uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request);
-
-void flushSTB(StoreBuffer * storebuffer);
-
-void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id));
+StoreBuffer *createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size);
+storebuffer_status_t handleStore(StoreBuffer *storebuffer,
+ const RubyRequest &request);
+uint64_t handleLoad(StoreBuffer *storebuffer, const RubyRequest &request);
+uint64_t handleAtomic(StoreBuffer *storebuffer, const RubyRequest &request);
+void flushSTB(StoreBuffer *storebuffer);
+void registerHitCallback(StoreBuffer *storebuffer,
+ void (*hit_callback)(int64_t access_id));
diff --git a/src/mem/ruby/storebuffer/storebuffer.cc b/src/mem/ruby/storebuffer/storebuffer.cc
index ed97b216c..d6ec0959e 100644
--- a/src/mem/ruby/storebuffer/storebuffer.cc
+++ b/src/mem/ruby/storebuffer/storebuffer.cc
@@ -26,240 +26,216 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-
#include <map>
-#include "mem/ruby/storebuffer/hfa.hh"
-#include "mem/ruby/storebuffer/storebuffer.hh"
#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/storebuffer/storebuffer.hh"
-#if RUBY_TSO_CHECKER
-#include "TsoChecker.hh"
-#endif
+using namespace std;
#define SYSTEM_EXIT ASSERT(0)
-
// global map of request id_s to map them back to storebuffer pointers
-map <uint64_t, StoreBuffer *> request_map;
-
-#if RUBY_TSO_CHECKER
-Tso::TsoChecker * g_tsoChecker;
-#endif
+map<uint64_t, StoreBuffer *> request_map;
-void hit(int64_t id) {
- if (request_map.find(id) == request_map.end()) {
- ERROR_OUT("Request ID not found in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map[id]->complete(id);
- request_map.erase(id);
- }
+void
+hit(int64_t id)
+{
+ if (request_map.find(id) == request_map.end()) {
+ ERROR_OUT("Request ID not found in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map[id]->complete(id);
+ request_map.erase(id);
+ }
}
+StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size)
+{
+ iseq = 0;
+ tso_iseq = 0;
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, id);
+ m_port = libruby_get_port(port_name, hit);
+ m_hit_callback = NULL;
+ ASSERT(storebuffer_size >= 0);
+ m_storebuffer_size = storebuffer_size;
+ m_id = id;
+ m_block_size = 1 << block_bits;
+ m_block_mask = ~(m_block_size - 1);
+ m_buffer_size = 0;
+ m_use_storebuffer = false;
+ m_storebuffer_full = false;
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ if (m_storebuffer_size > 0){
+ m_use_storebuffer = true;
+ }
-//*****************************************************************************************
-StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size) {
-#if RUBY_TSO_CHECKER
- if (id == 0) {
- g_tsoChecker = new Tso::TsoChecker();
- g_tsoChecker->init(64);
- }
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",
+ m_use_storebuffer);
#endif
- iseq = 0;
- tso_iseq = 0;
- char name [] = "Sequencer_";
- char port_name [13];
- sprintf(port_name, "%s%d", name, id);
- m_port = libruby_get_port(port_name, hit);
- m_hit_callback = NULL;
- ASSERT(storebuffer_size >= 0);
- m_storebuffer_size = storebuffer_size;
- m_id = id;
- m_block_size = 1 << block_bits;
- m_block_mask = ~(m_block_size - 1);
- m_buffer_size = 0;
- m_use_storebuffer = false;
- m_storebuffer_full = false;
- m_storebuffer_flushing = false;
- m_stalled_issue = true;
- if(m_storebuffer_size > 0){
- m_use_storebuffer = true;
- }
-
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",m_use_storebuffer);
- #endif
}
-//******************************************************************************************
-StoreBuffer::~StoreBuffer(){
-#if RUBY_TSO_CHECKER
- if (m_id == 0) {
- delete g_tsoChecker;
- }
-#endif
+StoreBuffer::~StoreBuffer()
+{
}
-//*****************************************************************************************************
-void StoreBuffer::registerHitCallback(void (*hit_callback)(int64_t request_id)) {
- assert(m_hit_callback == NULL); // can't assign hit_callback twice
- m_hit_callback = hit_callback;
+void
+StoreBuffer::registerHitCallback(void (*hit_callback)(int64_t request_id))
+{
+ assert(m_hit_callback == NULL); // can't assign hit_callback twice
+ m_hit_callback = hit_callback;
}
-//*****************************************************************************************************
-void StoreBuffer::addToStoreBuffer(struct RubyRequest request){
- if(m_use_storebuffer){
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
- #endif
+void
+StoreBuffer::addToStoreBuffer(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // make request to libruby
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ return;
+ }
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\t INSERTING new request\n");
- #endif
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+ DEBUG_OUT("\t INSERTING new request\n");
+#endif
buffer.push_front(SBEntry(request, NULL));
m_buffer_size++;
if (m_buffer_size >= m_storebuffer_size) {
- m_storebuffer_full = true;
- }
- else if (m_stalled_issue) {
- m_stalled_issue = false;
- issueNextStore();
+ m_storebuffer_full = true;
+ } else if (m_stalled_issue) {
+ m_stalled_issue = false;
+ issueNextStore();
}
iseq++;
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
- DEBUG_OUT("\n");
- #endif
- } //end if(m_use_storebuffer)
- else {
- // make request to libruby
- uint64_t id = libruby_issue_request(m_port, request);
- if (request_map.find(id) != request_map.end()) {
- ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request));
- }
- }
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
+ DEBUG_OUT("\n");
+#endif
}
-//*****************************************************************************************************
-// Return value of -2 indicates that the load request was satisfied by the store buffer
-// Return value of -3 indicates a partial match, so the load has to retry until NO_MATCH
-// Alternatively we could satisfy the partial match, but tso gets complicated and more races
-//*****************************************************************************************************
-int64_t StoreBuffer::handleLoad(struct RubyRequest request) {
- if (m_use_storebuffer) {
+// Return value of -2 indicates that the load request was satisfied by
+// the store buffer
+// Return value of -3 indicates a partial match, so the load has to
+// retry until NO_MATCH
+// Alternatively we could satisfy the partial match, but tso gets
+// complicated and more races
+int64_t
+StoreBuffer::handleLoad(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // make a request to ruby
+ return libruby_issue_request(m_port, request);
+ }
+
load_match match = checkForLoadHit(request);
if (match == FULL_MATCH) {
- // fill data
- returnMatchedData(request);
- iseq++;
- return -2;
- }
- else if (match == NO_MATCH) {
- // make request to libruby and return the id
- uint64_t id = libruby_issue_request(m_port, request);
- if (request_map.find(id) != request_map.end()) {
- ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request));
- }
- iseq++;
- return id;
- }
- else { // partial match
- return -3;
+ // fill data
+ returnMatchedData(request);
+ iseq++;
+ return -2;
+ } else if (match == NO_MATCH) {
+ // make request to libruby and return the id
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ iseq++;
+ return id;
+ } else { // partial match
+ return -3;
}
- }
- else {
- // make a request to ruby
- return libruby_issue_request(m_port, request);
- }
}
-
-//*****************************************************************************************************
// This function will fill the data array if any match is found
-//*****************************************************************************************************
-load_match StoreBuffer::checkForLoadHit(struct RubyRequest request) {
- if (m_use_storebuffer) {
+load_match
+StoreBuffer::checkForLoadHit(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // this function should never be called if we are not using a
+ // store buffer
+ ERROR_OUT("checkForLoadHit called while write buffer is not in use");
+ ASSERT(0);
+ }
+
physical_address_t physical_address = request.paddr;
int len = request.len;
uint8_t * data = new uint8_t[64];
memset(data, 0, 64);
- for (int i = physical_address%64; i < len; i++) {
- data[i] = 1;
- }
+ for (int i = physical_address % 64; i < len; i++)
+ data[i] = 1;
bool found = false;
physical_address_t lineaddr = physical_address & m_block_mask;
// iterate over the buffer looking for hits
- for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
- if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ deque<SBEntry>::iterator it = buffer.begin();
+ for (; it != buffer.end(); it++) {
+ RubyRequest &req = it->m_request;
+ if ((req.paddr & m_block_mask) != lineaddr)
+ continue;
+
found = true;
- for (int i = it->m_request.paddr%64; i < it->m_request.len; i++) {
- data[i] = 0;
- }
- }
+ for (int i = req.paddr % 64; i < req.len; i++)
+ data[i] = 0;
}
- // if any matching entry is found, determine if all the requested bytes have been matched
+ // if any matching entry is found, determine if all the
+ // requested bytes have been matched
if (found) {
- ASSERT(m_buffer_size > 0);
- int unmatched_bytes = 0;
- for (int i = physical_address%64; i < len; i++) {
- unmatched_bytes = unmatched_bytes + data[i];
- }
- if (unmatched_bytes == 0) {
- delete data;
- return FULL_MATCH;
- }
- else {
+ ASSERT(m_buffer_size > 0);
+ int unmatched_bytes = 0;
+ for (int i = physical_address%64; i < len; i++) {
+ unmatched_bytes = unmatched_bytes + data[i];
+ }
+ if (unmatched_bytes == 0) {
+ delete data;
+ return FULL_MATCH;
+ } else {
+ delete data;
+ return PARTIAL_MATCH;
+ }
+ } else {
delete data;
- return PARTIAL_MATCH;
- }
- }
- else {
- delete data;
- return NO_MATCH;
+ return NO_MATCH;
}
- } // end of if (m_use_storebuffer)
- else {
- // this function should never be called if we are not using a store buffer
- ERROR_OUT("checkForLoadHit called while write buffer is not in use");
- ASSERT(0);
- }
}
-
-//***************************************************************************************************
-void StoreBuffer::returnMatchedData(struct RubyRequest request) {
- if (m_use_storebuffer) {
+void
+StoreBuffer::returnMatchedData(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ ERROR_OUT("returnMatchedData called while write buffer is not in use");
+ ASSERT(0);
+ }
uint8_t * data = new uint8_t[64];
memset(data, 0, 64);
@@ -272,114 +248,75 @@ void StoreBuffer::returnMatchedData(struct RubyRequest request) {
ASSERT(checkForLoadHit(request) != NO_MATCH);
physical_address_t lineaddr = physical_address & m_block_mask;
bool found = false;
-#if RUBY_TSO_CHECKER
- Tso::TsoCheckerCmd * cmd;
-#endif
- deque<struct SBEntry>::iterator satisfying_store;
- for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
- if ((it->m_request.paddr & m_block_mask) == lineaddr) {
- if (!found) {
- found = true;
-#if RUBY_TSO_CHECKER
- satisfying_store = it;
- cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
- iseq, // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- NULL, // and data
- request.len, // and len
- DSRC_STB, // shouldn't matter
- libruby_get_time(), // macc: for store macc and time are the same and it
- 0, // gobs
- 0);
-#endif
- }
- uint8_t * dataPtr = it->m_request.data;
- int offset = it->m_request.paddr%64;
- for (int i = offset; i < it->m_request.len; i++) {
- if (!written[i]) { // don't overwrite data with earlier data
- data[i] = dataPtr[i-offset];
- written[i] = 1;
- }
+ deque<SBEntry>::iterator satisfying_store;
+ deque<SBEntry>::iterator it = buffer.begin();
+ for (; it != buffer.end(); it++) {
+ if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ if (!found) {
+ found = true;
+ }
+ uint8_t * dataPtr = it->m_request.data;
+ int offset = it->m_request.paddr%64;
+ for (int i = offset; i < it->m_request.len; i++) {
+ if (!written[i]) { // don't overwrite data with earlier data
+ data[i] = dataPtr[i-offset];
+ written[i] = 1;
+ }
+ }
}
- }
}
int i = physical_address%64;
for (int j = 0; (i < physical_address%64 + len) && (j < len); i++, j++) {
- if (written[i]) {
- request.data[j] = data[i];
- }
- }
-
-#if RUBY_TSO_CHECKER
- uint64_t tso_data = 0;
- memcpy(&tso_data, request.data, request.len);
- cmd->setData(tso_data);
-
- Tso::TsoCheckerCmd * adjust_cmd = satisfying_store->m_next_ptr;
- if (adjust_cmd == NULL) {
- adjust_cmd = cmd;
- }
- else {
- while (adjust_cmd->getNext() != NULL) {
- adjust_cmd = adjust_cmd->getNext();
- }
- adjust_cmd->setNext(cmd);
+ if (written[i]) {
+ request.data[j] = data[i];
+ }
}
-#endif
delete data;
delete written;
- }
- else {
- ERROR_OUT("returnMatchedData called while write buffer is not in use");
- ASSERT(0);
- }
}
+void
+StoreBuffer::flushStoreBuffer()
+{
+ if (!m_use_storebuffer) {
+ // do nothing
+ return;
+ }
+
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+#endif
-//******************************************************************************************
-void StoreBuffer::flushStoreBuffer(){
- if (m_use_storebuffer) {
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
- #endif
-
- if(m_buffer_size > 0) {
- m_storebuffer_flushing = true; // indicate that we are flushing
- }
- else {
- m_storebuffer_flushing = false;
- return;
- }
- }
- else {
- // do nothing
- return;
- }
+ m_storebuffer_flushing = (m_buffer_size > 0);
}
-//****************************************************************************************
-void StoreBuffer::issueNextStore() {
- SBEntry request = buffer.back();
- uint64_t id = libruby_issue_request(m_port, request.m_request);
- if (request_map.find(id) != request_map.end()) {
- assert(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request.m_request));
- }
+void
+StoreBuffer::issueNextStore()
+{
+ SBEntry request = buffer.back();
+ uint64_t id = libruby_issue_request(m_port, request.m_request);
+ if (request_map.find(id) != request_map.end()) {
+ assert(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request.m_request));
+ }
}
-//****************************************************************************************
-void StoreBuffer::complete(uint64_t id) {
- if (m_use_storebuffer) {
+void
+StoreBuffer::complete(uint64_t id)
+{
+ if (!m_use_storebuffer) {
+ m_hit_callback(id);
+ return;
+ }
+
ASSERT(outstanding_requests.find(id) != outstanding_requests.end());
- physical_address_t physical_address = outstanding_requests.find(id)->second.paddr;
+ physical_address_t physical_address =
+ outstanding_requests.find(id)->second.paddr;
RubyRequestType type = outstanding_requests.find(id)->second.type;
#ifdef DEBUG_WRITE_BUFFER
DEBUG_OUT("\n***StoreBuffer: complete BEGIN, contents:\n");
@@ -387,184 +324,59 @@ void StoreBuffer::complete(uint64_t id) {
#endif
if (type == RubyRequestType_ST) {
- physical_address_t lineaddr = physical_address & m_block_mask;
-
- //Note fastpath hits are handled like regular requests - they must remove the WB entry!
- if ( lineaddr != physical_address ) {
- ERROR_OUT("error: StoreBuffer: ruby returns pa 0x%0llx which is not a cache line: 0x%0llx\n", physical_address, lineaddr );
- }
-
- SBEntry from_buffer = buffer.back();
- if (((from_buffer.m_request.paddr & m_block_mask) == lineaddr) && (from_buffer.m_request.type == type)) {
- buffer.pop_back();
- m_buffer_size--;
- ASSERT(m_buffer_size >= 0);
-
-#if RUBY_TSO_CHECKER
- int len = outstanding_requests.find(id)->second.len;
- uint64_t data = 0;
- memcpy(&data, from_buffer.m_request.data, 4);
-
- cerr << m_id << " INSERTING STORE" << endl << flush;
- // add to the tsoChecker
- g_tsoChecker->input(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_STORE, // is a store
- MEM_STORE_COMMIT, // commit
- physical_address, // the address
- data, // and data
- len, // and len
- DSRC_STB, // shouldn't matter
- libruby_get_time(), // macc
- libruby_get_time(), // gobs
- libruby_get_time()); // time
- tso_iseq++;
-
- // also add the loads that are satisfied by this store
- if (from_buffer.m_next_ptr != NULL) {
- from_buffer.m_next_ptr->setGobs(libruby_get_time());
- g_tsoChecker->input(*(from_buffer.m_next_ptr));
- cerr << m_id << " INSERTING LOAD for STORE: " << from_buffer.m_next_ptr->getIseq() << endl << flush;
- tso_iseq++;
- Tso::TsoCheckerCmd * to_input = from_buffer.m_next_ptr->getNext();
- while (to_input != NULL) {
- if (to_input->getGobs() == 0) {
- to_input->setGobs(libruby_get_time());
- }
- cerr << m_id << " INSERTING LOAD iseq for STORE: " << to_input->getIseq() << endl << flush;
- g_tsoChecker->input(*to_input);
- tso_iseq++;
- to_input = to_input->getNext();
- }
- }
-#endif
- // schedule the next request
- if (m_buffer_size > 0) {
- issueNextStore();
- }
- else if (m_buffer_size == 0) {
- m_storebuffer_flushing = false;
- m_stalled_issue = true;
+ physical_address_t lineaddr = physical_address & m_block_mask;
+
+ // Note fastpath hits are handled like regular requests - they
+ // must remove the WB entry!
+ if (lineaddr != physical_address) {
+ ERROR_OUT("error: StoreBuffer: ruby returns pa 0x%0llx "
+ "which is not a cache line: 0x%0llx\n",
+ physical_address, lineaddr);
}
- m_storebuffer_full = false;
+ SBEntry from_buffer = buffer.back();
+ if ((from_buffer.m_request.paddr & m_block_mask) == lineaddr &&
+ from_buffer.m_request.type == type) {
+ buffer.pop_back();
+ m_buffer_size--;
+ ASSERT(m_buffer_size >= 0);
+
+ // schedule the next request
+ if (m_buffer_size > 0) {
+ issueNextStore();
+ } else if (m_buffer_size == 0) {
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ }
- }
- else {
- ERROR_OUT("[%d] error: StoreBuffer: at complete, address 0x%0llx not found.\n", m_id, lineaddr);
- ERROR_OUT("StoreBuffer:: complete FAILS\n");
- ASSERT(0);
- }
+ m_storebuffer_full = false;
+ } else {
+ ERROR_OUT("[%d] error: StoreBuffer: at complete, address 0x%0llx "
+ "not found.\n", m_id, lineaddr);
+ ERROR_OUT("StoreBuffer:: complete FAILS\n");
+ ASSERT(0);
+ }
#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
- DEBUG_OUT("\n");
-#endif
- } // end if (type == ST)
- else if (type == RubyRequestType_LD) {
-#if RUBY_TSO_CHECKER
- RubyRequest request = outstanding_requests.find(id)->second;
- uint64_t data = 0;
- memcpy(&data, request.data, request.len);
-
- // add to the tsoChecker if in order, otherwise, find a place to put ourselves
- if ((id & ISEQ_MASK) == tso_iseq) {
- tso_iseq++;
- cerr << m_id << " INSERTING LOAD" << endl << flush;
- g_tsoChecker->input(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- data, // and data
- request.len, // and len
- DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
- libruby_get_time(), // macc: for store macc and time are the same and it
- libruby_get_time(), // macc
- libruby_get_time()); // time
- }
- else {
- Tso::TsoCheckerCmd * cmd;
- cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- data, // and data
- request.len, // and len
- DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
- libruby_get_time(), // macc: for store macc and time are the same and it
- libruby_get_time(), // macc
- libruby_get_time()); // time
- insertTsoLL(cmd);
- }
+ DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
+ DEBUG_OUT("\n");
#endif
- m_hit_callback(id);
+ } else if (type == RubyRequestType_LD) {
+ m_hit_callback(id);
}
// LD, ST or FETCH hit callback
outstanding_requests.erase(id);
-
- } // end if(m_use_storebuffer)
- else {
- m_hit_callback(id);
- }
}
-#if RUBY_TSO_CHECKER
-void StoreBuffer::insertTsoLL(Tso::TsoCheckerCmd * cmd) {
- uint64_t count = cmd->getIseq();
- Tso::TsoCheckerCmd * current = NULL;
- Tso::TsoCheckerCmd * previous = NULL;
- deque<struct SBEntry>::reverse_iterator iter;
- bool found = false;
- for (iter = buffer.rbegin(); iter != buffer.rend(); ++ iter) {
- if (iter->m_next_ptr != NULL) {
- current = iter->m_next_ptr->getNext(); // initalize both to the beginning of the linked list
- previous = current;
- while (current != NULL) {
- if (current->getIseq() > count) {
- found = true;
- break;
- }
- previous = current;
- current = current->getNext();
- }
- }
- // break out if found a match, iterator should still point to the right SBEntry
- if (found) {
- break;
- }
- }
-
- // will insert at the end if not found
- if (!found) {
- buffer.front().m_next_ptr = cmd;
- }
- else if (current == previous) {
- cerr << "INSERTING " << count << " BEFORE: " << iter->m_next_ptr->getIseq();
- Tso::TsoCheckerCmd * temp = iter->m_next_ptr;
- iter->m_next_ptr = cmd;
- cmd->setNext(temp);
- }
- else {
- cerr << "INSERTING " << count << " BETWEEN: " << previous->getIseq() << " AND " << current->getIseq();
- cmd->setNext(current);
- previous->setNext(cmd);
- }
-}
-#endif
-
-//***************************************************************************************************
-void StoreBuffer::print( void )
+void
+StoreBuffer::print()
{
- DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n", m_id, m_buffer_size);
+ DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n",
+ m_id, m_buffer_size);
- if(m_use_storebuffer){
- }
- else{
- DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
- }
+ if (!m_use_storebuffer)
+ DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
}
diff --git a/src/mem/ruby/storebuffer/storebuffer.hh b/src/mem/ruby/storebuffer/storebuffer.hh
index 67555f48f..6d476706b 100644
--- a/src/mem/ruby/storebuffer/storebuffer.hh
+++ b/src/mem/ruby/storebuffer/storebuffer.hh
@@ -26,23 +26,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _STOREBUFFER_H_
-#define _STOREBUFFER_H_
+#ifndef __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__
+#define __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__
-#include <map>
#include <deque>
+#include <map>
-#include "config/ruby_tso_checker.hh"
-#include "mem/ruby/storebuffer/hfa.hh"
+#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/libruby.hh"
-#if RUBY_TSO_CHECKER
-#include "TsoCheckerCmd.hh"
-#endif
-
/**
- * Status for write buffer accesses. The Write buffer can hit in fastpath, be full, or
- * successfully enqueue the store request
+ * Status for write buffer accesses. The Write buffer can hit in
+ * fastpath, be full, or successfully enqueue the store request
*/
enum storebuffer_status_t { WB_FULL, WB_OK, WB_FLUSHING };
@@ -51,114 +46,106 @@ enum storebuffer_status_t { WB_FULL, WB_OK, WB_FLUSHING };
*/
enum load_match { NO_MATCH, PARTIAL_MATCH, FULL_MATCH };
-struct SBEntry {
- struct RubyRequest m_request;
-#if RUBY_TSO_CHECKER
- Tso::TsoCheckerCmd * m_next_ptr;
-#endif
- SBEntry(struct RubyRequest request, void * ptr)
- : m_request(request)
+struct SBEntry
+{
+ RubyRequest m_request;
+
+ SBEntry(RubyRequest request, void * ptr)
+ : m_request(request)
{
-#if RUBY_TSO_CHECKER
- m_next_ptr = (Tso::TsoCheckerCmd*) ptr;
-#endif
}
};
-class StoreBuffer {
- public:
- ///Constructor
- /// Note that the size of the Write Buffer is determined by the WRITE_BUFFER_SIZE config parameter
- StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size);
-
- /// Register hitcallback back to CPU
- void registerHitCallback(void (*hit_callback)(int64_t request_id));
-
- /// Destructor
- ~StoreBuffer();
+class StoreBuffer
+{
+ public:
+ /// Note that the size of the Write Buffer is determined by the
+ /// WRITE_BUFFER_SIZE config parameter
+ StoreBuffer(uint32_t id, uint32_t block_bits, int storebuffer_size);
- ///Adds a store entry to the write buffer
- void addToStoreBuffer(struct RubyRequest request);
+ ~StoreBuffer();
- ///Flushes the entire write buffer
- void flushStoreBuffer();
+ /// Register hitcallback back to CPU
+ void registerHitCallback(void (*hit_callback)(int64_t request_id));
- ///A pseq object calls this when Ruby completes our request
- void complete(uint64_t);
+ ///Adds a store entry to the write buffer
+ void addToStoreBuffer(RubyRequest request);
- /// Returns ID. If ID == -2, HIT, else it's an ID to wait on
- int64_t handleLoad(struct RubyRequest request);
+ ///Flushes the entire write buffer
+ void flushStoreBuffer();
- /// Used by all load insts to check whether it hits to any entry in the WB. If so, the WB is flushed
- load_match checkForLoadHit(struct RubyRequest request);
+ ///A pseq object calls this when Ruby completes our request
+ void complete(uint64_t);
- /// Used to fill the load in case of FULL_MATCH
- void returnMatchedData(struct RubyRequest request);
+ /// Returns ID. If ID == -2, HIT, else it's an ID to wait on
+ int64_t handleLoad(RubyRequest request);
- /// Issue next store in line
- void issueNextStore();
+ /// Used by all load insts to check whether it hits to any entry
+ /// in the WB. If so, the WB is flushed
+ load_match checkForLoadHit(RubyRequest request);
- /// prints out the contents of the Write Buffer
- void print();
+ /// Used to fill the load in case of FULL_MATCH
+ void returnMatchedData(RubyRequest request);
-#if RUBY_TSO_CHECKER
- /// if load completes before store, insert correctly to be issued to TSOChecker
- void insertTsoLL(Tso::TsoCheckerCmd * cmd);
-#endif
+ /// Issue next store in line
+ void issueNextStore();
- /// Returns flag indicating whether we are using the write buffer
- bool useStoreBuffer() { return m_use_storebuffer; }
+ /// prints out the contents of the Write Buffer
+ void print();
- bool storeBufferFull() { return m_storebuffer_full; }
+ /// Returns flag indicating whether we are using the write buffer
+ bool useStoreBuffer() { return m_use_storebuffer; }
- bool storeBufferFlushing() { return m_storebuffer_flushing; }
+ bool storeBufferFull() { return m_storebuffer_full; }
- private:
- /// id of this write buffer (one per sequencer object)
- uint32 m_id;
+ bool storeBufferFlushing() { return m_storebuffer_flushing; }
- /// number of bytes in cacheline
- uint32 m_block_size;
+ private:
+ /// id of this write buffer (one per sequencer object)
+ uint32_t m_id;
- /// the size of the write buffer
- uint32 m_storebuffer_size;
+ /// number of bytes in cacheline
+ uint32_t m_block_size;
- /// mask to strip off non-cache line bits
- pa_t m_block_mask;
+ /// the size of the write buffer
+ uint32_t m_storebuffer_size;
- /// list of store requests in the write buffer
- deque <struct SBEntry> buffer;
+ /// mask to strip off non-cache line bits
+ pa_t m_block_mask;
- /// the current length of the write buffer
- uint32 m_buffer_size;
+ /// list of store requests in the write buffer
+ std::deque<SBEntry> buffer;
- /// whether we want to simulate the write buffer or not:
- bool m_use_storebuffer;
+ /// the current length of the write buffer
+ uint32_t m_buffer_size;
- /// indicates whether the write buffer is full or not
- bool m_storebuffer_full;
+ /// whether we want to simulate the write buffer or not:
+ bool m_use_storebuffer;
- /// indicates that we are currently flushing the write buffer
- bool m_storebuffer_flushing;
+ /// indicates whether the write buffer is full or not
+ bool m_storebuffer_full;
- /// indicates that automatic issue is stalled and the next store to be added should issue itself
- bool m_stalled_issue;
+ /// indicates that we are currently flushing the write buffer
+ bool m_storebuffer_flushing;
- /// RubyPort to make requests to
- RubyPortHandle m_port;
+ /// indicates that automatic issue is stalled and the next store
+ /// to be added should issue itself
+ bool m_stalled_issue;
- /// HitCallback to CPU
- void (*m_hit_callback)(int64_t);
+ /// RubyPort to make requests to
+ RubyPortHandle m_port;
- /// Map the request id to rubyrequest
- map<uint64_t, struct RubyRequest> outstanding_requests;
+ /// HitCallback to CPU
+ void (*m_hit_callback)(int64_t);
- /// current instruction counter
- uint64_t iseq;
+ /// Map the request id to rubyrequest
+ std::map<uint64_t, RubyRequest> outstanding_requests;
+ /// current instruction counter
+ uint64_t iseq;
- /// input into tso counter
- uint64_t tso_iseq;
+ /// input into tso counter
+ uint64_t tso_iseq;
};
-#endif
+#endif // __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
index 93f537090..7fb9b44a0 100644
--- a/src/mem/ruby/system/DirectoryMemory.cc
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -110,7 +110,7 @@ DirectoryMemory::printGlobalConfig(ostream & out)
if (m_num_directories > 1) {
out << " number of selection bits: " << m_num_directories_bits << endl
<< " selection bits: " << m_numa_high_bit
- << "-" << m_numa_high_bit-m_num_directories_bits
+ << "-" << m_numa_high_bit-m_num_directories_bits
<< endl;
}
out << " total memory size bytes: " << m_total_size_bytes << endl;
@@ -216,7 +216,7 @@ DirectoryMemory::invalidateBlock(PhysAddress address)
assert(isPresent(address));
Index index = address.memoryModuleIndex();
-
+
if (index < 0 || index > m_size) {
ERROR_MSG("Directory Memory Assertion: "
"accessing memory out of range.");
diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc
index 1d971ae21..979a0d4ab 100644
--- a/src/mem/ruby/system/PersistentTable.cc
+++ b/src/mem/ruby/system/PersistentTable.cc
@@ -46,7 +46,7 @@ PersistentTable::~PersistentTable()
{
delete m_map_ptr;
m_map_ptr = NULL;
-}
+}
void
PersistentTable::persistentRequestLock(const Address& address,
diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh
index defcae2b8..64203c82c 100644
--- a/src/mem/ruby/system/PersistentTable.hh
+++ b/src/mem/ruby/system/PersistentTable.hh
@@ -54,7 +54,7 @@ class PersistentTable
// Destructor
~PersistentTable();
-
+
// Public Methods
void persistentRequestLock(const Address& address, MachineID locker,
AccessType type);
diff --git a/src/mem/ruby/system/SparseMemory.cc b/src/mem/ruby/system/SparseMemory.cc
index 6271d24c3..70e00f14b 100644
--- a/src/mem/ruby/system/SparseMemory.cc
+++ b/src/mem/ruby/system/SparseMemory.cc
@@ -34,7 +34,7 @@ SparseMemory::SparseMemory(int number_of_bits, int number_of_levels)
int extra;
m_total_number_of_bits = number_of_bits;
m_number_of_levels = number_of_levels;
-
+
//
// Create the array that describes the bits per level
//
@@ -48,7 +48,7 @@ SparseMemory::SparseMemory(int number_of_bits, int number_of_levels)
m_number_of_bits_per_level[level] = even_level_bits;
}
m_map_head = new SparseMapType;
-
+
m_total_adds = 0;
m_total_removes = 0;
m_adds_per_level = new uint64_t[m_number_of_levels];
@@ -70,14 +70,14 @@ SparseMemory::~SparseMemory()
// Recursively search table hierarchy for the lowest level table.
// Delete the lowest table first, the tables above
-void
+void
SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
{
SparseMapType::iterator iter;
for (iter = curTable->begin(); iter != curTable->end(); iter++) {
SparseMemEntry* entryStruct = &((*iter).second);
-
+
if (curLevel != (m_number_of_levels - 1)) {
// If the not at the last level, analyze those lower level
// tables first, then delete those next tables
@@ -91,19 +91,19 @@ SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
delete dirEntry;
}
entryStruct->entry = NULL;
- }
-
+ }
+
// Once all entries have been deleted, erase the entries
curTable->erase(curTable->begin(), curTable->end());
}
// tests to see if an address is present in the memory
-bool
+bool
SparseMemory::exist(const Address& address) const
{
SparseMapType* curTable = m_map_head;
Address curAddress;
-
+
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
@@ -111,7 +111,7 @@ SparseMemory::exist(const Address& address) const
int lowBit;
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
-
+
for (int level = 0; level < m_number_of_levels; level++) {
// Create the appropriate sub address for this level
// Note: that set Address is inclusive of the specified range,
@@ -119,15 +119,15 @@ SparseMemory::exist(const Address& address) const
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
-
+
DEBUG_EXPR(CACHE_COMP, HighPrio, level);
DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit);
DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
-
+
// Adjust the highBit value for the next level
highBit -= m_number_of_bits_per_level[level];
-
+
// If the address is found, move on to the next level.
// Otherwise, return not found
if (curTable->count(curAddress) != 0) {
@@ -137,31 +137,31 @@ SparseMemory::exist(const Address& address) const
return false;
}
}
-
+
DEBUG_MSG(CACHE_COMP, HighPrio, "Entry found");
return true;
}
// add an address to memory
-void
+void
SparseMemory::add(const Address& address)
{
assert(address == line_address(address));
assert(!exist(address));
-
+
m_total_adds++;
-
+
Address curAddress;
SparseMapType* curTable = m_map_head;
SparseMemEntry* entryStruct = NULL;
-
+
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
void* newEntry = NULL;
-
+
for (int level = 0; level < m_number_of_levels; level++) {
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range,
@@ -169,15 +169,15 @@ SparseMemory::add(const Address& address)
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
-
+
// Adjust the highBit value for the next level
highBit -= m_number_of_bits_per_level[level];
-
+
// if the address exists in the cur table, move on. Otherwise
// create a new table.
if (curTable->count(curAddress) != 0) {
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
- } else {
+ } else {
m_adds_per_level[level]++;
// if the last level, add a directory entry. Otherwise add a map.
@@ -195,57 +195,57 @@ SparseMemory::add(const Address& address)
entryStruct = new SparseMemEntry;
entryStruct->entry = newEntry;
(*curTable)[curAddress] = *entryStruct;
-
+
// Move to the next level of the heirarchy
curTable = (SparseMapType*)newEntry;
}
}
-
+
assert(exist(address));
return;
}
// recursively search table hierarchy for the lowest level table.
// remove the lowest entry and any empty tables above it.
-int
+int
SparseMemory::recursivelyRemoveLevels(const Address& address,
CurNextInfo& curInfo)
{
Address curAddress;
CurNextInfo nextInfo;
SparseMemEntry* entryStruct;
-
+
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range,
// thus the high bit is one less than the total number of bits
// used to create the address.
- curAddress.setAddress(address.bitSelect(curInfo.lowBit,
+ curAddress.setAddress(address.bitSelect(curInfo.lowBit,
curInfo.highBit - 1));
-
+
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.level);
DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.lowBit);
DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.highBit - 1);
DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
-
+
assert(curInfo.curTable->count(curAddress) != 0);
-
+
entryStruct = &((*(curInfo.curTable))[curAddress]);
-
+
if (curInfo.level < (m_number_of_levels - 1)) {
// set up next level's info
nextInfo.curTable = (SparseMapType*)(entryStruct->entry);
nextInfo.level = curInfo.level + 1;
- nextInfo.highBit = curInfo.highBit -
+ nextInfo.highBit = curInfo.highBit -
m_number_of_bits_per_level[curInfo.level];
- nextInfo.lowBit = curInfo.lowBit -
+ nextInfo.lowBit = curInfo.lowBit -
m_number_of_bits_per_level[curInfo.level + 1];
-
+
// recursively search the table hierarchy
int tableSize = recursivelyRemoveLevels(address, nextInfo);
-
+
// If this table below is now empty, we must delete it and
// erase it from our table.
if (tableSize == 0) {
@@ -269,54 +269,54 @@ SparseMemory::recursivelyRemoveLevels(const Address& address,
}
// remove an entry from the table
-void
+void
SparseMemory::remove(const Address& address)
{
assert(address == line_address(address));
assert(exist(address));
-
+
m_total_removes++;
-
+
CurNextInfo nextInfo;
-
+
// Initialize table pointer and level value
nextInfo.curTable = m_map_head;
nextInfo.level = 0;
-
+
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];;
-
+
// recursively search the table hierarchy for empty tables
// starting from the level 0. Note we do not check the return
// value because the head table is never deleted;
recursivelyRemoveLevels(address, nextInfo);
-
+
assert(!exist(address));
return;
}
// looks an address up in memory
-Directory_Entry*
+Directory_Entry*
SparseMemory::lookup(const Address& address)
{
assert(exist(address));
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
-
+
Address curAddress;
SparseMapType* curTable = m_map_head;
Directory_Entry* entry = NULL;
-
+
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
-
+
for (int level = 0; level < m_number_of_levels; level++) {
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range,
@@ -324,32 +324,32 @@ SparseMemory::lookup(const Address& address)
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
-
+
DEBUG_EXPR(CACHE_COMP, HighPrio, level);
DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit);
DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
-
+
// Adjust the highBit value for the next level
highBit -= m_number_of_bits_per_level[level];
-
+
// The entry should be in the table and valid
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
assert(curTable != NULL);
}
-
+
// The last entry actually points to the Directory entry not a table
entry = (Directory_Entry*)curTable;
return entry;
}
-void
+void
SparseMemory::print(ostream& out) const
{
}
-void
+void
SparseMemory::printStats(ostream& out) const
{
out << "total_adds: " << m_total_adds << " [";
diff --git a/src/mem/ruby/system/SparseMemory.hh b/src/mem/ruby/system/SparseMemory.hh
index 6e3c8f926..2c207aa3f 100644
--- a/src/mem/ruby/system/SparseMemory.hh
+++ b/src/mem/ruby/system/SparseMemory.hh
@@ -54,39 +54,39 @@ class SparseMemory
public:
SparseMemory(int number_of_bits, int number_of_levels);
~SparseMemory();
-
+
void printConfig(ostream& out) { }
-
+
bool exist(const Address& address) const;
void add(const Address& address);
void remove(const Address& address);
-
+
Directory_Entry* lookup(const Address& address);
-
+
// Print cache contents
void print(ostream& out) const;
void printStats(ostream& out) const;
private:
// Private Methods
-
+
// Private copy constructor and assignment operator
SparseMemory(const SparseMemory& obj);
SparseMemory& operator=(const SparseMemory& obj);
-
+
// Used by destructor to recursively remove all tables
void recursivelyRemoveTables(SparseMapType* currentTable, int level);
-
+
// recursive search for address and remove associated entries
int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo);
-
+
// Data Members (m_prefix)
SparseMapType* m_map_head;
-
+
int m_total_number_of_bits;
int m_number_of_levels;
int* m_number_of_bits_per_level;
-
+
uint64_t m_total_adds;
uint64_t m_total_removes;
uint64_t* m_adds_per_level;