diff options
Diffstat (limited to 'src/mem/ruby/network')
-rw-r--r-- | src/mem/ruby/network/MessageBuffer.cc | 104 | ||||
-rw-r--r-- | src/mem/ruby/network/MessageBuffer.hh | 57 | ||||
-rw-r--r-- | src/mem/ruby/network/MessageBuffer.py | 1 | ||||
-rw-r--r-- | src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc | 16 | ||||
-rw-r--r-- | src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc | 15 | ||||
-rw-r--r-- | src/mem/ruby/network/simple/PerfectSwitch.cc | 12 | ||||
-rw-r--r-- | src/mem/ruby/network/simple/SimpleNetwork.py | 6 | ||||
-rw-r--r-- | src/mem/ruby/network/simple/Switch.cc | 13 | ||||
-rw-r--r-- | src/mem/ruby/network/simple/Throttle.cc | 20 |
9 files changed, 89 insertions, 155 deletions
diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc index b07bdbdca..35850f61e 100644 --- a/src/mem/ruby/network/MessageBuffer.cc +++ b/src/mem/ruby/network/MessageBuffer.cc @@ -40,7 +40,7 @@ using namespace std; using m5::stl_helpers::operator<<; MessageBuffer::MessageBuffer(const Params *p) - : SimObject(p), m_recycle_latency(p->recycle_latency), + : SimObject(p), m_max_size(p->buffer_size), m_time_last_time_size_checked(0), m_time_last_time_enqueue(0), m_time_last_time_pop(0), m_last_arrival_time(0), m_strict_fifo(p->ordered), @@ -48,9 +48,6 @@ MessageBuffer::MessageBuffer(const Params *p) { m_msg_counter = 0; m_consumer = NULL; - m_sender = NULL; - m_receiver = NULL; - m_size_last_time_size_checked = 0; m_size_at_cycle_start = 0; m_msgs_this_cycle = 0; @@ -63,10 +60,10 @@ MessageBuffer::MessageBuffer(const Params *p) } unsigned int -MessageBuffer::getSize() +MessageBuffer::getSize(Tick curTime) { - if (m_time_last_time_size_checked != m_receiver->curCycle()) { - m_time_last_time_size_checked = m_receiver->curCycle(); + if (m_time_last_time_size_checked != curTime) { + m_time_last_time_size_checked = curTime; m_size_last_time_size_checked = m_prio_heap.size(); } @@ -74,7 +71,7 @@ MessageBuffer::getSize() } bool -MessageBuffer::areNSlotsAvailable(unsigned int n) +MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time) { // fast path when message buffers have infinite size @@ -88,11 +85,11 @@ MessageBuffer::areNSlotsAvailable(unsigned int n) // size immediately unsigned int current_size = 0; - if (m_time_last_time_pop < m_sender->clockEdge()) { + if (m_time_last_time_pop < current_time) { // no pops this cycle - heap size is correct current_size = m_prio_heap.size(); } else { - if (m_time_last_time_enqueue < m_sender->curCycle()) { + if (m_time_last_time_enqueue < current_time) { // no enqueues this cycle - m_size_at_cycle_start is correct current_size = m_size_at_cycle_start; } else { @@ -118,8 +115,6 @@ const Message* MessageBuffer::peek() const { DPRINTF(RubyQueue, "Peeking at head of queue.\n"); - assert(isReady()); - const Message* msg_ptr = m_prio_heap.front().get(); assert(msg_ptr); @@ -128,24 +123,24 @@ MessageBuffer::peek() const } // FIXME - move me somewhere else -Cycles +Tick random_time() { - Cycles time(1); - time += Cycles(random_mt.random(0, 3)); // [0...3] + Tick time = 1; + time += random_mt.random(0, 3); // [0...3] if (random_mt.random(0, 7) == 0) { // 1 in 8 chance - time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15] + time += 100 + random_mt.random(1, 15); // 100 + [1...15] } return time; } void -MessageBuffer::enqueue(MsgPtr message, Cycles delta) +MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta) { // record current time incase we have a pop that also adjusts my size - if (m_time_last_time_enqueue < m_sender->curCycle()) { + if (m_time_last_time_enqueue < current_time) { m_msgs_this_cycle = 0; // first msg this cycle - m_time_last_time_enqueue = m_sender->curCycle(); + m_time_last_time_enqueue = current_time; } m_msg_counter++; @@ -154,23 +149,20 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) // Calculate the arrival time of the message, that is, the first // cycle the message can be dequeued. assert(delta > 0); - Tick current_time = m_sender->clockEdge(); Tick arrival_time = 0; if (!RubySystem::getRandomization() || !m_randomization) { // No randomization - arrival_time = current_time + delta * m_sender->clockPeriod(); + arrival_time = current_time + delta; } else { // Randomization - ignore delta if (m_strict_fifo) { if (m_last_arrival_time < current_time) { m_last_arrival_time = current_time; } - arrival_time = m_last_arrival_time + - random_time() * m_sender->clockPeriod(); + arrival_time = m_last_arrival_time + random_time(); } else { - arrival_time = current_time + - random_time() * m_sender->clockPeriod(); + arrival_time = current_time + random_time(); } } @@ -180,9 +172,8 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) if (arrival_time < m_last_arrival_time) { panic("FIFO ordering violated: %s name: %s current time: %d " "delta: %d arrival_time: %d last arrival_time: %d\n", - *this, name(), current_time, - delta * m_sender->clockPeriod(), - arrival_time, m_last_arrival_time); + *this, name(), current_time, delta, arrival_time, + m_last_arrival_time); } } @@ -195,10 +186,10 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) Message* msg_ptr = message.get(); assert(msg_ptr != NULL); - assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() && + assert(current_time >= msg_ptr->getLastEnqueueTime() && "ensure we aren't dequeued early"); - msg_ptr->updateDelayedTicks(m_sender->clockEdge()); + msg_ptr->updateDelayedTicks(current_time); msg_ptr->setLastEnqueueTime(arrival_time); msg_ptr->setMsgCounter(m_msg_counter); @@ -215,32 +206,30 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) m_consumer->storeEventInfo(m_vnet_id); } -Cycles -MessageBuffer::dequeue() +Tick +MessageBuffer::dequeue(Tick current_time) { DPRINTF(RubyQueue, "Popping\n"); - assert(isReady()); + assert(isReady(current_time)); // get MsgPtr of the message about to be dequeued MsgPtr message = m_prio_heap.front(); // get the delay cycles - message->updateDelayedTicks(m_receiver->clockEdge()); - Cycles delayCycles = - m_receiver->ticksToCycles(message->getDelayedTicks()); + message->updateDelayedTicks(current_time); + Tick delay = message->getDelayedTicks(); // record previous size and time so the current buffer size isn't // adjusted until schd cycle - if (m_time_last_time_pop < m_receiver->clockEdge()) { + if (m_time_last_time_pop < current_time) { m_size_at_cycle_start = m_prio_heap.size(); - m_time_last_time_pop = m_receiver->clockEdge(); + m_time_last_time_pop = current_time; } - pop_heap(m_prio_heap.begin(), m_prio_heap.end(), - greater<MsgPtr>()); + pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); m_prio_heap.pop_back(); - return delayCycles; + return delay; } void @@ -249,25 +238,26 @@ MessageBuffer::clear() m_prio_heap.clear(); m_msg_counter = 0; - m_time_last_time_enqueue = Cycles(0); + m_time_last_time_enqueue = 0; m_time_last_time_pop = 0; m_size_at_cycle_start = 0; m_msgs_this_cycle = 0; } void -MessageBuffer::recycle() +MessageBuffer::recycle(Tick current_time, Tick recycle_latency) { DPRINTF(RubyQueue, "Recycling.\n"); - assert(isReady()); + assert(isReady(current_time)); MsgPtr node = m_prio_heap.front(); pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); - node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency)); + Tick future_time = current_time + recycle_latency; + node->setLastEnqueueTime(future_time); + m_prio_heap.back() = node; push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); - m_consumer-> - scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency)); + m_consumer->scheduleEventAbsolute(future_time); } void @@ -289,11 +279,10 @@ MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick) } void -MessageBuffer::reanalyzeMessages(Addr addr) +MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time) { DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr); assert(m_stall_msg_map.count(addr) > 0); - Tick curTick = m_receiver->clockEdge(); // // Put all stalled messages associated with this address back on the @@ -301,15 +290,14 @@ MessageBuffer::reanalyzeMessages(Addr addr) // scheduled for the current cycle so that the previously stalled messages // will be observed before any younger messages that may arrive this cycle // - reanalyzeList(m_stall_msg_map[addr], curTick); + reanalyzeList(m_stall_msg_map[addr], current_time); m_stall_msg_map.erase(addr); } void -MessageBuffer::reanalyzeAllMessages() +MessageBuffer::reanalyzeAllMessages(Tick current_time) { DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); - Tick curTick = m_receiver->clockEdge(); // // Put all stalled messages associated with this address back on the @@ -319,20 +307,20 @@ MessageBuffer::reanalyzeAllMessages() // for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); map_iter != m_stall_msg_map.end(); ++map_iter) { - reanalyzeList(map_iter->second, curTick); + reanalyzeList(map_iter->second, current_time); } m_stall_msg_map.clear(); } void -MessageBuffer::stallMessage(Addr addr) +MessageBuffer::stallMessage(Addr addr, Tick current_time) { DPRINTF(RubyQueue, "Stalling due to %s\n", addr); - assert(isReady()); + assert(isReady(current_time)); assert(getOffset(addr) == 0); MsgPtr message = m_prio_heap.front(); - dequeue(); + dequeue(current_time); // // Note: no event is scheduled to analyze the map at a later time. @@ -356,10 +344,10 @@ MessageBuffer::print(ostream& out) const } bool -MessageBuffer::isReady() const +MessageBuffer::isReady(Tick current_time) const { return ((m_prio_heap.size() > 0) && - (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge())); + (m_prio_heap.front()->getLastEnqueueTime() <= current_time)); } bool diff --git a/src/mem/ruby/network/MessageBuffer.hh b/src/mem/ruby/network/MessageBuffer.hh index 4209aea0f..4fdf4978d 100644 --- a/src/mem/ruby/network/MessageBuffer.hh +++ b/src/mem/ruby/network/MessageBuffer.hh @@ -55,24 +55,24 @@ class MessageBuffer : public SimObject typedef MessageBufferParams Params; MessageBuffer(const Params *p); - void reanalyzeMessages(Addr addr); - void reanalyzeAllMessages(); - void stallMessage(Addr addr); + void reanalyzeMessages(Addr addr, Tick current_time); + void reanalyzeAllMessages(Tick current_time); + void stallMessage(Addr addr, Tick current_time); // TRUE if head of queue timestamp <= SystemTime - bool isReady() const; + bool isReady(Tick current_time) const; void - delayHead() + delayHead(Tick current_time, Tick delta) { MsgPtr m = m_prio_heap.front(); std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>()); m_prio_heap.pop_back(); - enqueue(m, Cycles(1)); + enqueue(m, current_time, delta); } - bool areNSlotsAvailable(unsigned int n); + bool areNSlotsAvailable(unsigned int n, Tick curTime); int getPriority() { return m_priority_rank; } void setPriority(int rank) { m_priority_rank = rank; } void setConsumer(Consumer* consumer) @@ -86,20 +86,6 @@ class MessageBuffer : public SimObject m_consumer = consumer; } - void setSender(ClockedObject* obj) - { - DPRINTF(RubyQueue, "Setting sender: %s\n", obj->name()); - assert(m_sender == NULL || m_sender == obj); - m_sender = obj; - } - - void setReceiver(ClockedObject* obj) - { - DPRINTF(RubyQueue, "Setting receiver: %s\n", obj->name()); - assert(m_receiver == NULL || m_receiver == obj); - m_receiver = obj; - } - Consumer* getConsumer() { return m_consumer; } bool getOrdered() { return m_strict_fifo; } @@ -108,26 +94,20 @@ class MessageBuffer : public SimObject //! message queue. The function assumes that the queue is nonempty. const Message* peek() const; - const MsgPtr& - peekMsgPtr() const - { - assert(isReady()); - return m_prio_heap.front(); - } + const MsgPtr &peekMsgPtr() const { return m_prio_heap.front(); } - void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); } - void enqueue(MsgPtr message, Cycles delta); + void enqueue(MsgPtr message, Tick curTime, Tick delta); //! Updates the delay cycles of the message at the head of the queue, //! removes it from the queue and returns its total delay. - Cycles dequeue(); + Tick dequeue(Tick current_time); - void recycle(); + void recycle(Tick current_time, Tick recycle_latency); bool isEmpty() const { return m_prio_heap.size() == 0; } bool isStallMapEmpty() { return m_stall_msg_map.size() == 0; } unsigned int getStallMapSize() { return m_stall_msg_map.size(); } - unsigned int getSize(); + unsigned int getSize(Tick curTime); void clear(); void print(std::ostream& out) const; @@ -148,17 +128,10 @@ class MessageBuffer : public SimObject uint32_t functionalWrite(Packet *pkt); private: - //added by SS - const Cycles m_recycle_latency; - void reanalyzeList(std::list<MsgPtr> &, Tick); private: // Data Members (m_ prefix) - //! The two ends of the buffer. - ClockedObject* m_sender; - ClockedObject* m_receiver; - //! Consumer to signal a wakeup(), can be NULL Consumer* m_consumer; std::vector<MsgPtr> m_prio_heap; @@ -170,12 +143,12 @@ class MessageBuffer : public SimObject StallMsgMapType m_stall_msg_map; const unsigned int m_max_size; - Cycles m_time_last_time_size_checked; + Tick m_time_last_time_size_checked; unsigned int m_size_last_time_size_checked; // variables used so enqueues appear to happen immediately, while // pop happen the next cycle - Cycles m_time_last_time_enqueue; + Tick m_time_last_time_enqueue; Tick m_time_last_time_pop; Tick m_last_arrival_time; @@ -193,7 +166,7 @@ class MessageBuffer : public SimObject int m_vnet_id; }; -Cycles random_time(); +Tick random_time(); inline std::ostream& operator<<(std::ostream& out, const MessageBuffer& obj) diff --git a/src/mem/ruby/network/MessageBuffer.py b/src/mem/ruby/network/MessageBuffer.py index 88c528e30..d8a028532 100644 --- a/src/mem/ruby/network/MessageBuffer.py +++ b/src/mem/ruby/network/MessageBuffer.py @@ -37,7 +37,6 @@ class MessageBuffer(SimObject): ordered = Param.Bool(False, "Whether the buffer is ordered") buffer_size = Param.Unsigned(0, "Maximum number of entries to buffer \ (0 allows infinite entries)") - recycle_latency = Param.Cycles(Parent.recycle_latency, "") randomization = Param.Bool(False, "") master = MasterPort("Master port to MessageBuffer receiver") diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc index c7bd6178a..e350eba6b 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc @@ -115,13 +115,6 @@ NetworkInterface_d::addNode(vector<MessageBuffer *>& in, for (auto& it : in) { if (it != nullptr) { it->setConsumer(this); - it->setReceiver(this); - } - } - - for (auto& it : out) { - if (it != nullptr) { - it->setSender(this); } } } @@ -222,6 +215,7 @@ NetworkInterface_d::wakeup() DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld", m_id, curCycle()); MsgPtr msg_ptr; + Tick curTime = clockEdge(); // Checking for messages coming from the protocol // can pick up a message/cycle for each virtual net @@ -231,10 +225,10 @@ NetworkInterface_d::wakeup() continue; } - while (b->isReady()) { // Is there a message waiting + while (b->isReady(curTime)) { // Is there a message waiting msg_ptr = b->peekMsgPtr(); if (flitisizeMessage(msg_ptr, vnet)) { - b->dequeue(); + b->dequeue(curTime); } else { break; } @@ -253,7 +247,7 @@ NetworkInterface_d::wakeup() free_signal = true; outNode_ptr[t_flit->get_vnet()]->enqueue( - t_flit->get_msg_ptr(), Cycles(1)); + t_flit->get_msg_ptr(), curTime, cyclesToTicks(Cycles(1))); } // Simply send a credit back since we are not buffering // this flit in the NI @@ -363,7 +357,7 @@ NetworkInterface_d::checkReschedule() continue; } - while (it->isReady()) { // Is there a message waiting + while (it->isReady(clockEdge())) { // Is there a message waiting scheduleEvent(Cycles(1)); return; } diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc index d834ea1a3..3d75ef8c2 100644 --- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc +++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc @@ -99,13 +99,6 @@ NetworkInterface::addNode(vector<MessageBuffer*>& in, for (auto& it: in) { if (it != nullptr) { it->setConsumer(this); - it->setReceiver(this); - } - } - - for (auto& it : out) { - if (it != nullptr) { - it->setSender(this); } } } @@ -250,10 +243,10 @@ NetworkInterface::wakeup() continue; } - while (b->isReady()) { // Is there a message waiting + while (b->isReady(clockEdge())) { // Is there a message waiting msg_ptr = b->peekMsgPtr(); if (flitisizeMessage(msg_ptr, vnet)) { - b->dequeue(); + b->dequeue(clockEdge()); } else { break; } @@ -272,7 +265,7 @@ NetworkInterface::wakeup() m_id, curCycle()); outNode_ptr[t_flit->get_vnet()]->enqueue( - t_flit->get_msg_ptr(), Cycles(1)); + t_flit->get_msg_ptr(), clockEdge(), cyclesToTicks(Cycles(1))); // signal the upstream router that this vc can be freed now inNetLink->release_vc_link(t_flit->get_vc(), @@ -334,7 +327,7 @@ NetworkInterface::checkReschedule() continue; } - while (it->isReady()) { // Is there a message waiting + while (it->isReady(clockEdge())) { // Is there a message waiting scheduleEvent(Cycles(1)); return; } diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index 697357ccb..301d453c5 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -144,8 +144,9 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming, // temporary vectors to store the routing results vector<LinkID> output_links; vector<NetDest> output_link_destinations; + Tick current_time = m_switch->clockEdge(); - while (buffer->isReady()) { + while (buffer->isReady(current_time)) { DPRINTF(RubyNetwork, "incoming: %d\n", incoming); // Peek at message @@ -176,7 +177,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming, for (int out = 0; out < m_out.size(); out++) { int out_queue_length = 0; for (int v = 0; v < m_virtual_networks; v++) { - out_queue_length += m_out[out][v]->getSize(); + out_queue_length += m_out[out][v]->getSize(current_time); } int value = (out_queue_length << 8) | @@ -220,7 +221,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming, for (int i = 0; i < output_links.size(); i++) { int outgoing = output_links[i]; - if (!m_out[outgoing][vnet]->areNSlotsAvailable(1)) + if (!m_out[outgoing][vnet]->areNSlotsAvailable(1, current_time)) enough = false; DPRINTF(RubyNetwork, "Checking if node is blocked ..." @@ -251,7 +252,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming, } // Dequeue msg - buffer->dequeue(); + buffer->dequeue(current_time); m_pending_message_count[vnet]--; // Enqueue it - for all outgoing queues @@ -273,7 +274,8 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming, "inport[%d][%d] to outport [%d][%d].\n", incoming, vnet, outgoing, vnet); - m_out[outgoing][vnet]->enqueue(msg_ptr); + m_out[outgoing][vnet]->enqueue(msg_ptr, current_time, + m_switch->cyclesToTicks(Cycles(1))); } } } diff --git a/src/mem/ruby/network/simple/SimpleNetwork.py b/src/mem/ruby/network/simple/SimpleNetwork.py index f4ec440a3..87de0fb46 100644 --- a/src/mem/ruby/network/simple/SimpleNetwork.py +++ b/src/mem/ruby/network/simple/SimpleNetwork.py @@ -41,9 +41,6 @@ class SimpleNetwork(RubyNetwork): endpoint_bandwidth = Param.Int(1000, "bandwidth adjustment factor"); adaptive_routing = Param.Bool(False, "enable adaptive routing"); int_link_buffers = VectorParam.MessageBuffer("Buffers for int_links") - # int_links do not recycle buffers, so this parameter is not used. - # TODO: Move recycle_latency out of MessageBuffers and into controllers - recycle_latency = Param.Cycles(0, "") def setup_buffers(self): # Note that all SimpleNetwork MessageBuffers are currently ordered @@ -82,6 +79,3 @@ class Switch(BasicRouter): virt_nets = Param.Int(Parent.number_of_virtual_networks, "number of virtual networks") port_buffers = VectorParam.MessageBuffer("Port buffers") - # Ports do not recycle buffers, so this parameter is not used. - # TODO: Move recycle_latency out of MessageBuffers and into controllers - recycle_latency = Param.Cycles(0, "") diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc index b9d0b8010..0951ef138 100644 --- a/src/mem/ruby/network/simple/Switch.cc +++ b/src/mem/ruby/network/simple/Switch.cc @@ -69,12 +69,6 @@ void Switch::addInPort(const vector<MessageBuffer*>& in) { m_perfect_switch->addInPort(in); - - for (auto& it : in) { - if (it != nullptr) { - it->setReceiver(this); - } - } } void @@ -95,17 +89,10 @@ Switch::addOutPort(const vector<MessageBuffer*>& out, vector<MessageBuffer*> intermediateBuffers; for (int i = 0; i < out.size(); ++i) { - if (out[i] != nullptr) { - out[i]->setSender(this); - } - assert(m_num_connected_buffers < m_port_buffers.size()); MessageBuffer* buffer_ptr = m_port_buffers[m_num_connected_buffers]; m_num_connected_buffers++; intermediateBuffers.push_back(buffer_ptr); - - buffer_ptr->setSender(this); - buffer_ptr->setReceiver(this); } // Hook the queues to the PerfectSwitch diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc index 01d1f6fbe..3863ab944 100644 --- a/src/mem/ruby/network/simple/Throttle.cc +++ b/src/mem/ruby/network/simple/Throttle.cc @@ -94,14 +94,16 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup, if (out == nullptr || in == nullptr) { return; } - assert(m_units_remaining[vnet] >= 0); - while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) && - out->areNSlotsAvailable(1)) { + assert(m_units_remaining[vnet] >= 0); + Tick current_time = m_switch->clockEdge(); + while (bw_remaining > 0 && (in->isReady(current_time) || + m_units_remaining[vnet] > 0) && + out->areNSlotsAvailable(1, current_time)) { // See if we are done transferring the previous message on // this virtual network - if (m_units_remaining[vnet] == 0 && in->isReady()) { + if (m_units_remaining[vnet] == 0 && in->isReady(current_time)) { // Find the size of the message we are moving MsgPtr msg_ptr = in->peekMsgPtr(); Message *net_msg_ptr = msg_ptr.get(); @@ -114,8 +116,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup, m_ruby_system->curCycle()); // Move the message - in->dequeue(); - out->enqueue(msg_ptr, m_link_latency); + in->dequeue(current_time); + out->enqueue(msg_ptr, current_time, + m_switch->cyclesToTicks(m_link_latency)); // Count the message m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++; @@ -128,8 +131,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup, bw_remaining = max(0, -diff); } - if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) && - !out->areNSlotsAvailable(1)) { + if (bw_remaining > 0 && (in->isReady(current_time) || + m_units_remaining[vnet] > 0) && + !out->areNSlotsAvailable(1, current_time)) { DPRINTF(RubyNetwork, "vnet: %d", vnet); // schedule me to wakeup again because I'm waiting for my |