summaryrefslogtreecommitdiff
path: root/src/mem/ruby
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2013-02-10 21:26:24 -0600
committerNilay Vaish <nilay@cs.wisc.edu>2013-02-10 21:26:24 -0600
commitd3aebe1f91aa166329c8ee102fdcb2c9734fdceb (patch)
treef6f7807c24eccde98ee1e4bfe4c9c24b3c9392b2 /src/mem/ruby
parentaffd77ea77860fa9231aba8e43ad95dea06a114a (diff)
downloadgem5-d3aebe1f91aa166329c8ee102fdcb2c9734fdceb.tar.xz
ruby: replaces Time with Cycles in many places
The patch started of with replacing Time with Cycles in the Consumer class. But to get ruby to compile, the rest of the changes had to be carried out. Subsequent patches will further this process, till we completely replace Time with Cycles.
Diffstat (limited to 'src/mem/ruby')
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc28
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.hh18
-rw-r--r--src/mem/ruby/buffers/MessageBufferNode.hh18
-rw-r--r--src/mem/ruby/common/Consumer.cc7
-rw-r--r--src/mem/ruby/common/Consumer.hh5
-rw-r--r--src/mem/ruby/network/BasicLink.hh2
-rw-r--r--src/mem/ruby/network/BasicLink.py2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc10
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc6
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc12
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh3
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/Router.cc16
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/Router.hh2
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc2
-rw-r--r--src/mem/ruby/network/simple/Switch.cc6
-rw-r--r--src/mem/ruby/network/simple/Switch.hh2
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc10
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh17
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh2
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh2
-rw-r--r--src/mem/ruby/slicc_interface/Controller.py2
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Util.hh5
-rw-r--r--src/mem/ruby/system/Cache.py2
-rw-r--r--src/mem/ruby/system/CacheMemory.hh4
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.cc4
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.hh6
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.py6
-rw-r--r--src/mem/ruby/system/Sequencer.cc2
-rw-r--r--src/mem/ruby/system/TimerTable.cc5
-rw-r--r--src/mem/ruby/system/TimerTable.hh7
-rw-r--r--src/mem/ruby/system/WireBuffer.cc12
-rw-r--r--src/mem/ruby/system/WireBuffer.hh2
40 files changed, 122 insertions, 121 deletions
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
index 446586f5a..f39e35738 100644
--- a/src/mem/ruby/buffers/MessageBuffer.cc
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -39,6 +39,7 @@ using namespace std;
using m5::stl_helpers::operator<<;
MessageBuffer::MessageBuffer(const string &name)
+ : m_last_arrival_time(0)
{
m_msg_counter = 0;
m_consumer_ptr = NULL;
@@ -48,7 +49,6 @@ MessageBuffer::MessageBuffer(const string &name)
m_strict_fifo = true;
m_size = 0;
m_max_size = -1;
- m_last_arrival_time = 0;
m_randomization = true;
m_size_last_time_size_checked = 0;
m_time_last_time_size_checked = 0;
@@ -139,19 +139,19 @@ MessageBuffer::peekAtHeadOfQueue() const
}
// FIXME - move me somewhere else
-int
+Cycles
random_time()
{
- int time = 1;
- time += random() & 0x3; // [0...3]
+ Cycles time(1);
+ time += Cycles(random() & 0x3); // [0...3]
if ((random() & 0x7) == 0) { // 1 in 8 chance
- time += 100 + (random() % 0xf); // 100 + [1...15]
+ time += Cycles(100 + (random() % 0xf)); // 100 + [1...15]
}
return time;
}
void
-MessageBuffer::enqueue(MsgPtr message, Time delta)
+MessageBuffer::enqueue(MsgPtr message, Cycles delta)
{
m_msg_counter++;
m_size++;
@@ -170,8 +170,9 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
assert(delta>0);
- Time current_time = m_clockobj_ptr->curCycle();
- Time arrival_time = 0;
+ Cycles current_time(m_clockobj_ptr->curCycle());
+ Cycles arrival_time(0);
+
if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
arrival_time = current_time + delta;
@@ -304,6 +305,7 @@ MessageBuffer::recycle()
MessageBufferNode node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
+
node.m_time = m_clockobj_ptr->curCycle() + m_recycle_latency;
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
@@ -317,6 +319,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
{
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
assert(m_stall_msg_map.count(addr) > 0);
+ Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
//
// Put all stalled messages associated with this address back on the
@@ -324,8 +327,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
//
while(!m_stall_msg_map[addr].empty()) {
m_msg_counter++;
- MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
- m_msg_counter,
+ MessageBufferNode msgNode(nextCycle, m_msg_counter,
m_stall_msg_map[addr].front());
m_prio_heap.push_back(msgNode);
@@ -342,6 +344,7 @@ void
MessageBuffer::reanalyzeAllMessages()
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages %s\n");
+ Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
//
// Put all stalled messages associated with this address back on the
@@ -353,14 +356,13 @@ MessageBuffer::reanalyzeAllMessages()
while(!(map_iter->second).empty()) {
m_msg_counter++;
- MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
- m_msg_counter,
+ MessageBufferNode msgNode(nextCycle, m_msg_counter,
(map_iter->second).front());
m_prio_heap.push_back(msgNode);
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
-
+
m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
(map_iter->second).pop_front();
}
diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh
index 581f03453..5bad45f2d 100644
--- a/src/mem/ruby/buffers/MessageBuffer.hh
+++ b/src/mem/ruby/buffers/MessageBuffer.hh
@@ -54,11 +54,8 @@ class MessageBuffer
std::string name() const { return m_name; }
- void
- setRecycleLatency(int recycle_latency)
- {
- m_recycle_latency = recycle_latency;
- }
+ void setRecycleLatency(Cycles recycle_latency)
+ { m_recycle_latency = recycle_latency; }
void reanalyzeMessages(const Address& addr);
void reanalyzeAllMessages();
@@ -74,7 +71,7 @@ class MessageBuffer
std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
std::greater<MessageBufferNode>());
m_prio_heap.pop_back();
- enqueue(node.m_msgptr, 1);
+ enqueue(node.m_msgptr, Cycles(1));
}
bool areNSlotsAvailable(int n);
@@ -114,8 +111,8 @@ class MessageBuffer
return m_prio_heap.front().m_msgptr;
}
- void enqueue(MsgPtr message) { enqueue(message, 1); }
- void enqueue(MsgPtr message, Time delta);
+ void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
+ void enqueue(MsgPtr message, Cycles delta);
//! returns delay ticks of the message.
Time dequeue_getDelayCycles(MsgPtr& message);
@@ -160,7 +157,7 @@ class MessageBuffer
private:
//added by SS
- int m_recycle_latency;
+ Cycles m_recycle_latency;
// Private Methods
Time setAndReturnDelayCycles(MsgPtr message);
@@ -204,7 +201,8 @@ class MessageBuffer
bool m_strict_fifo;
bool m_ordering_set;
bool m_randomization;
- Time m_last_arrival_time;
+
+ Cycles m_last_arrival_time;
int m_input_link_id;
int m_vnet_id;
diff --git a/src/mem/ruby/buffers/MessageBufferNode.hh b/src/mem/ruby/buffers/MessageBufferNode.hh
index 5e7a52e12..57f09a913 100644
--- a/src/mem/ruby/buffers/MessageBufferNode.hh
+++ b/src/mem/ruby/buffers/MessageBufferNode.hh
@@ -37,22 +37,18 @@ class MessageBufferNode
{
public:
MessageBufferNode()
- {
- m_time = 0;
- m_msg_counter = 0;
- }
+ : m_time(0), m_msg_counter(0)
+ {}
- MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
- {
- m_time = time;
- m_msgptr = msgptr;
- m_msg_counter = counter;
- }
+ MessageBufferNode(const Cycles& time, uint64_t counter,
+ const MsgPtr& msgptr)
+ : m_time(time), m_msg_counter(counter), m_msgptr(msgptr)
+ {}
void print(std::ostream& out) const;
public:
- Time m_time;
+ Cycles m_time;
uint64 m_msg_counter; // FIXME, should this be a 64-bit value?
MsgPtr m_msgptr;
};
diff --git a/src/mem/ruby/common/Consumer.cc b/src/mem/ruby/common/Consumer.cc
index a829f4d99..b7d31ccb0 100644
--- a/src/mem/ruby/common/Consumer.cc
+++ b/src/mem/ruby/common/Consumer.cc
@@ -29,13 +29,14 @@
#include "mem/ruby/common/Consumer.hh"
void
-Consumer::scheduleEvent(Time timeDelta)
+Consumer::scheduleEvent(Cycles timeDelta)
{
- scheduleEventAbsolute(timeDelta + em->curCycle());
+ timeDelta += em->curCycle();
+ scheduleEventAbsolute(timeDelta);
}
void
-Consumer::scheduleEventAbsolute(Time timeAbs)
+Consumer::scheduleEventAbsolute(Cycles timeAbs)
{
Tick evt_time = em->clockPeriod() * timeAbs;
if (!alreadyScheduled(evt_time)) {
diff --git a/src/mem/ruby/common/Consumer.hh b/src/mem/ruby/common/Consumer.hh
index 9e089e992..33c78d780 100644
--- a/src/mem/ruby/common/Consumer.hh
+++ b/src/mem/ruby/common/Consumer.hh
@@ -38,7 +38,6 @@
#include <iostream>
#include <set>
-#include "mem/ruby/common/TypeDefines.hh"
#include "sim/clocked_object.hh"
class Consumer
@@ -88,8 +87,8 @@ class Consumer
m_scheduled_wakeups.erase(time);
}
- void scheduleEvent(Time timeDelta);
- void scheduleEventAbsolute(Time timeAbs);
+ void scheduleEvent(Cycles timeDelta);
+ void scheduleEventAbsolute(Cycles timeAbs);
private:
Tick m_last_scheduled_wakeup;
diff --git a/src/mem/ruby/network/BasicLink.hh b/src/mem/ruby/network/BasicLink.hh
index 11832f9c3..634b9143e 100644
--- a/src/mem/ruby/network/BasicLink.hh
+++ b/src/mem/ruby/network/BasicLink.hh
@@ -52,7 +52,7 @@ class BasicLink : public SimObject
void print(std::ostream& out) const;
- int m_latency;
+ Cycles m_latency;
int m_bandwidth_factor;
int m_weight;
};
diff --git a/src/mem/ruby/network/BasicLink.py b/src/mem/ruby/network/BasicLink.py
index 841208578..8fc83c9e2 100644
--- a/src/mem/ruby/network/BasicLink.py
+++ b/src/mem/ruby/network/BasicLink.py
@@ -34,7 +34,7 @@ class BasicLink(SimObject):
type = 'BasicLink'
cxx_header = "mem/ruby/network/BasicLink.hh"
link_id = Param.Int("ID in relation to other links")
- latency = Param.Int(1, "latency")
+ latency = Param.Cycles(1, "latency")
# The following banwidth factor does not translate to the same value for
# both the simple and Garnet models. For the most part, the bandwidth
# factor is the width of the link in bytes, expect for certain situations
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
index bbd785e2c..14c3f543c 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
@@ -37,7 +37,7 @@ class NetworkLink_d(ClockedObject):
type = 'NetworkLink_d'
cxx_header = "mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh"
link_id = Param.Int(Parent.link_id, "link id")
- link_latency = Param.Int(Parent.latency, "link latency")
+ link_latency = Param.Cycles(Parent.latency, "link latency")
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
index 3ebf7c6e9..07c6bec3a 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
@@ -90,7 +90,7 @@ class InputUnit_d : public Consumer
{
flit_d *t_flit = new flit_d(in_vc, free_signal, curTime);
creditQueue->insert(t_flit);
- m_credit_link->scheduleEvent(1);
+ m_credit_link->scheduleEvent(Cycles(1));
}
inline int
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
index f0e117aad..c58b38c52 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
@@ -244,14 +244,14 @@ NetworkInterface_d::wakeup()
free_signal = true;
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), 1);
+ t_flit->get_msg_ptr(), Cycles(1));
}
// Simply send a credit back since we are not buffering
// this flit in the NI
flit_d *credit_flit = new flit_d(t_flit->get_vc(), free_signal,
m_net_ptr->curCycle());
creditQueue->insert(credit_flit);
- m_ni_credit_link->scheduleEvent(1);
+ m_ni_credit_link->scheduleEvent(Cycles(1));
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
@@ -324,7 +324,7 @@ NetworkInterface_d::scheduleOutputLink()
t_flit->set_time(m_net_ptr->curCycle() + 1);
outSrcQueue->insert(t_flit);
// schedule the out link
- outNetLink->scheduleEvent(1);
+ outNetLink->scheduleEvent(Cycles(1));
if (t_flit->get_type() == TAIL_ ||
t_flit->get_type() == HEAD_TAIL_) {
@@ -351,13 +351,13 @@ NetworkInterface_d::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
index c52c903e0..14f6a6527 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
@@ -72,7 +72,7 @@ class NetworkLink_d : public ClockedObject, public Consumer
protected:
int m_id;
- int m_latency;
+ Cycles m_latency;
int channel_width;
GarnetNetwork_d *m_net_ptr;
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
index 4fa7dcb90..4b5b851e2 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
@@ -84,7 +84,7 @@ class OutputUnit_d : public Consumer
insert_flit(flit_d *t_flit)
{
m_out_buffer->insert(t_flit);
- m_out_link->scheduleEvent(1);
+ m_out_link->scheduleEvent(Cycles(1));
}
private:
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
index 2a759eb87..eaa147c41 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
@@ -135,13 +135,13 @@ Router_d::route_req(flit_d *t_flit, InputUnit_d *in_unit, int invc)
void
Router_d::vcarb_req()
{
- m_vc_alloc->scheduleEvent(1);
+ m_vc_alloc->scheduleEvent(Cycles(1));
}
void
Router_d::swarb_req()
{
- m_sw_alloc->scheduleEvent(1);
+ m_sw_alloc->scheduleEvent(Cycles(1));
}
void
@@ -154,7 +154,7 @@ void
Router_d::update_sw_winner(int inport, flit_d *t_flit)
{
m_switch->update_sw_winner(inport, t_flit);
- m_switch->scheduleEvent(1);
+ m_switch->scheduleEvent(Cycles(1));
}
void
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
index ab3f4b761..49f2e8c57 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
@@ -220,7 +220,7 @@ SWallocator_d::check_for_wakeup()
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, ACTIVE_, SA_,
m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
index db7446f7a..0b2c3a227 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
@@ -90,7 +90,7 @@ Switch_d::check_for_wakeup()
{
for (int inport = 0; inport < m_num_inports; inport++) {
if (m_switch_buffer[inport]->isReadyForNext(m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
break;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
index 012837362..9569810e8 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
@@ -260,7 +260,7 @@ VCallocator_d::check_for_wakeup()
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, VC_AB_, VA_,
m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py b/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
index 41049884f..9903c9cd6 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
@@ -37,7 +37,7 @@ class NetworkLink(ClockedObject):
type = 'NetworkLink'
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh"
link_id = Param.Int(Parent.link_id, "link id")
- link_latency = Param.Int(Parent.latency, "link latency")
+ link_latency = Param.Cycles(Parent.latency, "link latency")
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
index 7267da36d..f0d59f4b4 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
@@ -193,7 +193,7 @@ NetworkInterface::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[vc]->grant_vc(grant_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// The tail flit corresponding to this vc has been buffered at the next hop
@@ -203,7 +203,7 @@ NetworkInterface::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[vc]->setState(IDLE_, release_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// Looking for a free output vc
@@ -270,7 +270,7 @@ NetworkInterface::wakeup()
m_id, m_net_ptr->curCycle());
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), 1);
+ t_flit->get_msg_ptr(), Cycles(1));
// signal the upstream router that this vc can be freed now
inNetLink->release_vc_link(t_flit->get_vc(),
@@ -316,7 +316,7 @@ NetworkInterface::scheduleOutputLink()
outSrcQueue->insert(t_flit);
// schedule the out link
- outNetLink->scheduleEvent(1);
+ outNetLink->scheduleEvent(Cycles(1));
return;
}
}
@@ -328,13 +328,13 @@ NetworkInterface::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
index 45dbe7f52..3bee9f659 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
@@ -81,7 +81,8 @@ class NetworkLink : public ClockedObject, public FlexibleConsumer
uint32_t functionalWrite(Packet *);
protected:
- int m_id, m_latency;
+ int m_id;
+ Cycles m_latency;
int m_in_port, m_out_port;
int m_link_utilized;
std::vector<int> m_vc_load;
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
index ca82f0757..71ee1d0bf 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
@@ -130,7 +130,7 @@ Router::isBufferNotFull(int vc, int inport)
// This has to be updated and arbitration performed
void
Router::request_vc(int in_vc, int in_port, NetDest destination,
- Time request_time)
+ Cycles request_time)
{
assert(m_in_vc_state[in_port][in_vc]->isInState(IDLE_, request_time));
@@ -231,7 +231,7 @@ Router::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[out_port][vc]->grant_vc(grant_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
void
@@ -239,7 +239,7 @@ Router::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[out_port][vc]->setState(IDLE_, release_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// This function calculated the output port for a particular destination.
@@ -274,7 +274,8 @@ Router::routeCompute(flit *m_flit, int inport)
m_router_buffers[outport][outvc]->insert(m_flit);
if (m_net_ptr->getNumPipeStages() > 1)
- scheduleEvent(m_net_ptr->getNumPipeStages() - 1 );
+ scheduleEvent(Cycles(m_net_ptr->getNumPipeStages() - 1));
+
if ((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_)) {
NetworkMessage *nm =
safe_cast<NetworkMessage*>(m_flit->get_msg_ptr().get());
@@ -290,6 +291,7 @@ Router::routeCompute(flit *m_flit, int inport)
curCycle());
}
}
+
if ((m_flit->get_type() == TAIL_) || (m_flit->get_type() == HEAD_TAIL_)) {
m_in_vc_state[inport][invc]->setState(IDLE_, curCycle() + 1);
m_in_link[inport]->release_vc_link(invc, curCycle() + 1);
@@ -361,7 +363,7 @@ Router::scheduleOutputLinks()
m_router_buffers[port][vc_tolookat]->getTopFlit();
t_flit->set_time(curCycle() + 1 );
m_out_src_queue[port]->insert(t_flit);
- m_out_link[port]->scheduleEvent(1);
+ m_out_link[port]->scheduleEvent(Cycles(1));
break; // done for this port
}
}
@@ -383,7 +385,7 @@ Router::checkReschedule()
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
@@ -396,7 +398,7 @@ Router::check_arbiter_reschedule()
for (int port = 0; port < m_in_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_in_vc_state[port][vc]->isInState(VC_AB_, curCycle() + 1)) {
- m_vc_arbiter->scheduleEvent(1);
+ m_vc_arbiter->scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh b/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
index 988ec3a55..aa31fd939 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
@@ -60,7 +60,7 @@ class Router : public BasicRouter, public FlexibleConsumer
int link_weight);
void wakeup();
void request_vc(int in_vc, int in_port, NetDest destination,
- Time request_time);
+ Cycles request_time);
bool isBufferNotFull(int vc, int inport);
void grant_vc(int out_port, int vc, Time grant_time);
void release_vc(int out_port, int vc, Time release_time);
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 37035f95f..687bdbd86 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -267,7 +267,7 @@ PerfectSwitch::wakeup()
// There were not enough resources
if (!enough) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
DPRINTF(RubyNetwork, "Can't deliver message since a node "
"is blocked\n");
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index d9abc4cc7..76d37c321 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -72,12 +72,12 @@ Switch::addInPort(const vector<MessageBuffer*>& in)
void
Switch::addOutPort(const vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
+ const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
{
// Create a throttle
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
- link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
- this);
+ link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
+ this);
m_throttles.push_back(throttle_ptr);
// Create one buffer per vnet (these are intermediaryQueues)
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
index 2757e6511..8c265a4bf 100644
--- a/src/mem/ruby/network/simple/Switch.hh
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -63,7 +63,7 @@ class Switch : public BasicRouter
void init();
void addInPort(const std::vector<MessageBuffer*>& in);
void addOutPort(const std::vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, int link_latency,
+ const NetDest& routing_table_entry, Cycles link_latency,
int bw_multiplier);
const Throttle* getThrottle(LinkID link_number) const;
const std::vector<Throttle*>* getThrottles() const;
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index b591cc81b..bb5d9cf53 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -48,7 +48,7 @@ const int PRIORITY_SWITCH_LIMIT = 128;
static int network_message_to_size(NetworkMessage* net_msg_ptr);
-Throttle::Throttle(int sID, NodeID node, int link_latency,
+Throttle::Throttle(int sID, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em)
: Consumer(em)
@@ -57,7 +57,7 @@ Throttle::Throttle(int sID, NodeID node, int link_latency,
m_sID = sID;
}
-Throttle::Throttle(NodeID node, int link_latency,
+Throttle::Throttle(NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em)
: Consumer(em)
@@ -67,8 +67,8 @@ Throttle::Throttle(NodeID node, int link_latency,
}
void
-Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier,
- int endpoint_bandwidth)
+Throttle::init(NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth)
{
m_node = node;
m_vnets = 0;
@@ -222,7 +222,7 @@ Throttle::wakeup()
// We are out of bandwidth for this cycle, so wakeup next
// cycle and continue
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
}
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index 077382041..b75161164 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -52,10 +52,10 @@ class MessageBuffer;
class Throttle : public Consumer
{
public:
- Throttle(int sID, NodeID node, int link_latency,
+ Throttle(int sID, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em);
- Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier,
+ Throttle(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth, ClockedObject *em);
~Throttle() {}
@@ -70,13 +70,10 @@ class Throttle : public Consumer
void clearStats();
// The average utilization (a percent) since last clearStats()
double getUtilization() const;
- int
- getLinkBandwidth() const
- {
- return m_endpoint_bandwidth * m_link_bandwidth_multiplier;
- }
- int getLatency() const { return m_link_latency; }
+ int getLinkBandwidth() const
+ { return m_endpoint_bandwidth * m_link_bandwidth_multiplier; }
+ Cycles getLatency() const { return m_link_latency; }
const std::vector<std::vector<int> >&
getCounters() const
{
@@ -88,7 +85,7 @@ class Throttle : public Consumer
void print(std::ostream& out) const;
private:
- void init(NodeID node, int link_latency, int link_bandwidth_multiplier,
+ void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr,
ClockedObject *em);
@@ -106,7 +103,7 @@ class Throttle : public Consumer
int m_sID;
NodeID m_node;
int m_link_bandwidth_multiplier;
- int m_link_latency;
+ Cycles m_link_latency;
int m_wakeups_wo_switch;
int m_endpoint_bandwidth;
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
index 69333f481..e92f96fd5 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -52,8 +52,6 @@ class AbstractCacheEntry : public AbstractEntry
void changePermission(AccessPermission new_perm);
Address m_Address; // Address of this block, required by CacheMemory
- Time m_LastRef; // Last time this block was referenced, required
- // by CacheMemory
int m_locked; // Holds info whether the address is locked,
// required for implementing LL/SC
};
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index c452da723..44981a7e8 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -106,7 +106,7 @@ class AbstractController : public ClockedObject, public Consumer
protected:
int m_transitions_per_cycle;
int m_buffer_size;
- int m_recycle_latency;
+ Cycles m_recycle_latency;
std::string m_name;
NodeID m_version;
Network* m_net_ptr;
diff --git a/src/mem/ruby/slicc_interface/Controller.py b/src/mem/ruby/slicc_interface/Controller.py
index aa8f35145..5c2fd9b71 100644
--- a/src/mem/ruby/slicc_interface/Controller.py
+++ b/src/mem/ruby/slicc_interface/Controller.py
@@ -40,6 +40,6 @@ class RubyController(ClockedObject):
transitions_per_cycle = \
Param.Int(32, "no. of SLICC state machine transitions per cycle")
buffer_size = Param.Int(0, "max buffer size 0 means infinite")
- recycle_latency = Param.Int(10, "")
+ recycle_latency = Param.Cycles(10, "")
number_of_TBEs = Param.Int(256, "")
ruby_system = Param.RubySystem("");
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
index f55b6eae4..622efd04c 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
@@ -37,9 +37,8 @@
#include "debug/RubySlicc.hh"
#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
-#include "mem/ruby/system/System.hh"
+#include "mem/packet.hh"
inline int
random(int n)
@@ -53,6 +52,8 @@ zero_time()
return 0;
}
+inline Cycles TimeToCycles(Time t) { return Cycles(t); }
+
inline NodeID
intToID(int nodenum)
{
diff --git a/src/mem/ruby/system/Cache.py b/src/mem/ruby/system/Cache.py
index 4b0269822..d4af1320a 100644
--- a/src/mem/ruby/system/Cache.py
+++ b/src/mem/ruby/system/Cache.py
@@ -36,7 +36,7 @@ class RubyCache(SimObject):
cxx_class = 'CacheMemory'
cxx_header = "mem/ruby/system/CacheMemory.hh"
size = Param.MemorySize("capacity in bytes");
- latency = Param.Int("");
+ latency = Param.Cycles("");
assoc = Param.Int("");
replacement_policy = Param.String("PSEUDO_LRU", "");
start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line");
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
index a4950a09b..6b436082f 100644
--- a/src/mem/ruby/system/CacheMemory.hh
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -92,7 +92,7 @@ class CacheMemory : public SimObject
AbstractCacheEntry* lookup(const Address& address);
const AbstractCacheEntry* lookup(const Address& address) const;
- int getLatency() const { return m_latency; }
+ Cycles getLatency() const { return m_latency; }
// Hook for checkpointing the contents of the cache
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
@@ -144,7 +144,7 @@ class CacheMemory : public SimObject
private:
const std::string m_cache_name;
- int m_latency;
+ Cycles m_latency;
// Data Members (m_prefix)
bool m_is_instruction_only_cache;
diff --git a/src/mem/ruby/system/RubyMemoryControl.cc b/src/mem/ruby/system/RubyMemoryControl.cc
index 620113719..121551299 100644
--- a/src/mem/ruby/system/RubyMemoryControl.cc
+++ b/src/mem/ruby/system/RubyMemoryControl.cc
@@ -374,10 +374,10 @@ RubyMemoryControl::printStats(ostream& out) const
// Queue up a completed request to send back to directory
void
-RubyMemoryControl::enqueueToDirectory(MemoryNode req, int latency)
+RubyMemoryControl::enqueueToDirectory(MemoryNode req, Cycles latency)
{
Time arrival_time = curTick() + (latency * clock);
- Time ruby_arrival_time = arrival_time / g_system_ptr->clockPeriod();
+ Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
req.m_time = ruby_arrival_time;
m_response_queue.push_back(req);
diff --git a/src/mem/ruby/system/RubyMemoryControl.hh b/src/mem/ruby/system/RubyMemoryControl.hh
index 53e8fabef..bd94abaa6 100644
--- a/src/mem/ruby/system/RubyMemoryControl.hh
+++ b/src/mem/ruby/system/RubyMemoryControl.hh
@@ -100,7 +100,7 @@ class RubyMemoryControl : public MemoryControl
uint32_t functionalWriteBuffers(Packet *pkt);
private:
- void enqueueToDirectory(MemoryNode req, int latency);
+ void enqueueToDirectory(MemoryNode req, Cycles latency);
const int getRank(int bank) const;
bool queueReady(int bank);
void issueRequest(int bank);
@@ -128,11 +128,11 @@ class RubyMemoryControl : public MemoryControl
int m_rank_rank_delay;
int m_read_write_delay;
int m_basic_bus_busy_time;
- int m_mem_ctl_latency;
+ Cycles m_mem_ctl_latency;
int m_refresh_period;
int m_mem_random_arbitrate;
int m_tFaw;
- int m_mem_fixed_delay;
+ Cycles m_mem_fixed_delay;
int m_total_banks;
int m_total_ranks;
diff --git a/src/mem/ruby/system/RubyMemoryControl.py b/src/mem/ruby/system/RubyMemoryControl.py
index 7764938d3..e46b3f223 100644
--- a/src/mem/ruby/system/RubyMemoryControl.py
+++ b/src/mem/ruby/system/RubyMemoryControl.py
@@ -50,8 +50,8 @@ class RubyMemoryControl(MemoryControl):
rank_rank_delay = Param.Int(1, "");
read_write_delay = Param.Int(2, "");
basic_bus_busy_time = Param.Int(2, "");
- mem_ctl_latency = Param.Int(12, "");
- refresh_period = Param.Int(1560, "");
+ mem_ctl_latency = Param.Cycles(12, "");
+ refresh_period = Param.Cycles(1560, "");
tFaw = Param.Int(0, "");
mem_random_arbitrate = Param.Int(0, "");
- mem_fixed_delay = Param.Int(0, "");
+ mem_fixed_delay = Param.Cycles(0, "");
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 9b30fdbd5..d4cfe77b1 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -668,7 +668,7 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
msg->getPhysicalAddress(),
RubyRequestType_to_string(secondary_type));
- Time latency = 0; // initialzed to an null value
+ Cycles latency(0); // initialzed to an null value
if (secondary_type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc
index 992401c50..d87f11662 100644
--- a/src/mem/ruby/system/TimerTable.cc
+++ b/src/mem/ruby/system/TimerTable.cc
@@ -66,12 +66,13 @@ TimerTable::readyAddress() const
}
void
-TimerTable::set(const Address& address, Time relative_latency)
+TimerTable::set(const Address& address, Cycles relative_latency)
{
assert(address == line_address(address));
assert(relative_latency > 0);
assert(!m_map.count(address));
- Time ready_time = m_clockobj_ptr->curCycle() + relative_latency;
+
+ Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
m_map[address] = ready_time;
assert(m_consumer_ptr != NULL);
m_consumer_ptr->scheduleEventAbsolute(ready_time);
diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh
index ecd95ee19..95af2eaa7 100644
--- a/src/mem/ruby/system/TimerTable.hh
+++ b/src/mem/ruby/system/TimerTable.hh
@@ -64,7 +64,10 @@ class TimerTable
bool isReady() const;
const Address& readyAddress() const;
bool isSet(const Address& address) const { return !!m_map.count(address); }
- void set(const Address& address, Time relative_latency);
+ void set(const Address& address, Cycles relative_latency);
+ void set(const Address& address, uint64_t relative_latency)
+ { set(address, Cycles(relative_latency)); }
+
void unset(const Address& address);
void print(std::ostream& out) const;
@@ -79,7 +82,7 @@ class TimerTable
// use a std::map for the address map as this container is sorted
// and ensures a well-defined iteration order
- typedef std::map<Address, Time> AddressMap;
+ typedef std::map<Address, Cycles> AddressMap;
AddressMap m_map;
mutable bool m_next_valid;
mutable Time m_next_time; // Only valid if m_next_valid is true
diff --git a/src/mem/ruby/system/WireBuffer.cc b/src/mem/ruby/system/WireBuffer.cc
index b5a2849ce..fba53b902 100644
--- a/src/mem/ruby/system/WireBuffer.cc
+++ b/src/mem/ruby/system/WireBuffer.cc
@@ -70,12 +70,13 @@ WireBuffer::~WireBuffer()
}
void
-WireBuffer::enqueue(MsgPtr message, int latency)
+WireBuffer::enqueue(MsgPtr message, Cycles latency)
{
m_msg_counter++;
- Time current_time = g_system_ptr->getTime();
- Time arrival_time = current_time + latency;
+ Cycles current_time = g_system_ptr->getTime();
+ Cycles arrival_time = current_time + latency;
assert(arrival_time > current_time);
+
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
m_message_queue.push_back(thisNode);
if (m_consumer_ptr != NULL) {
@@ -122,11 +123,12 @@ WireBuffer::recycle()
MessageBufferNode node = m_message_queue.front();
pop_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
- node.m_time = g_system_ptr->getTime() + 1;
+
+ node.m_time = g_system_ptr->getTime() + Cycles(1);
m_message_queue.back() = node;
push_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
- m_consumer_ptr->scheduleEventAbsolute(g_system_ptr->getTime() + 1);
+ m_consumer_ptr->scheduleEventAbsolute(node.m_time);
}
bool
diff --git a/src/mem/ruby/system/WireBuffer.hh b/src/mem/ruby/system/WireBuffer.hh
index 07da965c8..d71bf4520 100644
--- a/src/mem/ruby/system/WireBuffer.hh
+++ b/src/mem/ruby/system/WireBuffer.hh
@@ -72,7 +72,7 @@ class WireBuffer : public SimObject
void setDescription(const std::string& name) { m_description = name; };
std::string getDescription() { return m_description; };
- void enqueue(MsgPtr message, int latency );
+ void enqueue(MsgPtr message, Cycles latency);
void dequeue();
const Message* peek();
MessageBufferNode peekNode();