From 5ab13e2deb8f904ef2a233749193fa09ea7013c4 Mon Sep 17 00:00:00 2001 From: Nathan Binkert Date: Mon, 22 Mar 2010 18:43:53 -0700 Subject: ruby: style pass --- src/mem/ruby/system/Sequencer.cc | 880 ++++++++++++++++++++------------------- 1 file changed, 462 insertions(+), 418 deletions(-) (limited to 'src/mem/ruby/system/Sequencer.cc') diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index d6dba08f9..a473ccf44 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -1,4 +1,3 @@ - /* * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood * All rights reserved. @@ -27,22 +26,21 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "mem/ruby/libruby.hh" -#include "mem/ruby/common/Global.hh" -#include "mem/ruby/system/Sequencer.hh" -#include "mem/ruby/system/System.hh" -#include "mem/protocol/Protocol.hh" -#include "mem/ruby/profiler/Profiler.hh" -#include "mem/ruby/system/CacheMemory.hh" +#include "cpu/rubytest/RubyTester.hh" +#include "mem/gems_common/Map.hh" #include "mem/protocol/CacheMsg.hh" -#include "mem/ruby/recorder/Tracer.hh" -#include "mem/ruby/common/SubBlock.hh" #include "mem/protocol/Protocol.hh" -#include "mem/gems_common/Map.hh" +#include "mem/protocol/Protocol.hh" #include "mem/ruby/buffers/MessageBuffer.hh" +#include "mem/ruby/common/Global.hh" +#include "mem/ruby/common/SubBlock.hh" +#include "mem/ruby/libruby.hh" +#include "mem/ruby/profiler/Profiler.hh" +#include "mem/ruby/recorder/Tracer.hh" #include "mem/ruby/slicc_interface/AbstractController.hh" -#include "cpu/rubytest/RubyTester.hh" - +#include "mem/ruby/system/CacheMemory.hh" +#include "mem/ruby/system/Sequencer.hh" +#include "mem/ruby/system/System.hh" #include "params/RubySequencer.hh" Sequencer * @@ -50,7 +48,7 @@ RubySequencerParams::create() { return new Sequencer(this); } - + Sequencer::Sequencer(const Params *p) : RubyPort(p), deadlockCheckEvent(this) { @@ -58,7 +56,7 @@ Sequencer::Sequencer(const Params *p) m_store_waiting_on_store_cycles = 0; m_load_waiting_on_store_cycles = 0; m_load_waiting_on_load_cycles = 0; - + m_outstanding_count = 0; m_max_outstanding_requests = 0; @@ -78,478 +76,524 @@ Sequencer::Sequencer(const Params *p) assert(m_dataCache_ptr != NULL); } -Sequencer::~Sequencer() { - +Sequencer::~Sequencer() +{ } -void Sequencer::wakeup() { - // Check for deadlock of any of the requests - Time current_time = g_eventQueue_ptr->getTime(); - - // Check across all outstanding requests - int total_outstanding = 0; - - Vector
keys = m_readRequestTable.keys(); - for (int i=0; iissue_time >= m_deadlock_threshold) { - WARN_MSG("Possible Deadlock detected"); - WARN_EXPR(request); - WARN_EXPR(m_version); - WARN_EXPR(request->ruby_request.paddr); - WARN_EXPR(keys.size()); - WARN_EXPR(current_time); - WARN_EXPR(request->issue_time); - WARN_EXPR(current_time - request->issue_time); - ERROR_MSG("Aborting"); +void +Sequencer::wakeup() +{ + // Check for deadlock of any of the requests + Time current_time = g_eventQueue_ptr->getTime(); + + // Check across all outstanding requests + int total_outstanding = 0; + + Vector
keys = m_readRequestTable.keys(); + for (int i = 0; i < keys.size(); i++) { + SequencerRequest* request = m_readRequestTable.lookup(keys[i]); + if (current_time - request->issue_time >= m_deadlock_threshold) { + WARN_MSG("Possible Deadlock detected"); + WARN_EXPR(request); + WARN_EXPR(m_version); + WARN_EXPR(request->ruby_request.paddr); + WARN_EXPR(keys.size()); + WARN_EXPR(current_time); + WARN_EXPR(request->issue_time); + WARN_EXPR(current_time - request->issue_time); + ERROR_MSG("Aborting"); + } } - } - - keys = m_writeRequestTable.keys(); - for (int i=0; iissue_time >= m_deadlock_threshold) { - WARN_MSG("Possible Deadlock detected"); - WARN_EXPR(request); - WARN_EXPR(m_version); - WARN_EXPR(current_time); - WARN_EXPR(request->issue_time); - WARN_EXPR(current_time - request->issue_time); - WARN_EXPR(keys.size()); - ERROR_MSG("Aborting"); + + keys = m_writeRequestTable.keys(); + for (int i = 0; i < keys.size(); i++) { + SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); + if (current_time - request->issue_time >= m_deadlock_threshold) { + WARN_MSG("Possible Deadlock detected"); + WARN_EXPR(request); + WARN_EXPR(m_version); + WARN_EXPR(current_time); + WARN_EXPR(request->issue_time); + WARN_EXPR(current_time - request->issue_time); + WARN_EXPR(keys.size()); + ERROR_MSG("Aborting"); + } } - } - total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size(); - assert(m_outstanding_count == total_outstanding); + total_outstanding += m_writeRequestTable.size(); + total_outstanding += m_readRequestTable.size(); - if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking - schedule(deadlockCheckEvent, - (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick); - } + assert(m_outstanding_count == total_outstanding); + + if (m_outstanding_count > 0) { + // If there are still outstanding requests, keep checking + schedule(deadlockCheckEvent, + m_deadlock_threshold * g_eventQueue_ptr->getClock() + + curTick); + } } -void Sequencer::printStats(ostream & out) const { - out << "Sequencer: " << m_name << endl; - out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl; - out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl; - out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl; - out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl; +void +Sequencer::printStats(ostream & out) const +{ + out << "Sequencer: " << m_name << endl + << " store_waiting_on_load_cycles: " + << m_store_waiting_on_load_cycles << endl + << " store_waiting_on_store_cycles: " + << m_store_waiting_on_store_cycles << endl + << " load_waiting_on_load_cycles: " + << m_load_waiting_on_load_cycles << endl + << " load_waiting_on_store_cycles: " + << m_load_waiting_on_store_cycles << endl; } -void Sequencer::printProgress(ostream& out) const{ - /* - int total_demand = 0; - out << "Sequencer Stats Version " << m_version << endl; - out << "Current time = " << g_eventQueue_ptr->getTime() << endl; - out << "---------------" << endl; - out << "outstanding requests" << endl; - - Vector
rkeys = m_readRequestTable.keys(); - int read_size = rkeys.size(); - out << "proc " << m_version << " Read Requests = " << read_size << endl; - // print the request table - for(int i=0; i < read_size; ++i){ - SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]); - out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl; - total_demand++; - } - - Vector
wkeys = m_writeRequestTable.keys(); - int write_size = wkeys.size(); - out << "proc " << m_version << " Write Requests = " << write_size << endl; - // print the request table - for(int i=0; i < write_size; ++i){ - CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]); - out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; - if( request.getPrefetch() == PrefetchBit_No ){ +void +Sequencer::printProgress(ostream& out) const +{ +#if 0 + int total_demand = 0; + out << "Sequencer Stats Version " << m_version << endl; + out << "Current time = " << g_eventQueue_ptr->getTime() << endl; + out << "---------------" << endl; + out << "outstanding requests" << endl; + + Vector
rkeys = m_readRequestTable.keys(); + int read_size = rkeys.size(); + out << "proc " << m_version << " Read Requests = " << read_size << endl; + + // print the request table + for (int i = 0; i < read_size; ++i) { + SequencerRequest *request = m_readRequestTable.lookup(rkeys[i]); + out << "\tRequest[ " << i << " ] = " << request->type + << " Address " << rkeys[i] + << " Posted " << request->issue_time + << " PF " << PrefetchBit_No << endl; total_demand++; - } - } + } - out << endl; + Vector
wkeys = m_writeRequestTable.keys(); + int write_size = wkeys.size(); + out << "proc " << m_version << " Write Requests = " << write_size << endl; + + // print the request table + for (int i = 0; i < write_size; ++i){ + CacheMsg &request = m_writeRequestTable.lookup(wkeys[i]); + out << "\tRequest[ " << i << " ] = " << request.getType() + << " Address " << wkeys[i] + << " Posted " << request.getTime() + << " PF " << request.getPrefetch() << endl; + if (request.getPrefetch() == PrefetchBit_No) { + total_demand++; + } + } - out << "Total Number Outstanding: " << m_outstanding_count << endl; - out << "Total Number Demand : " << total_demand << endl; - out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; - out << endl; - out << endl; - */ + out << endl; + + out << "Total Number Outstanding: " << m_outstanding_count << endl + << "Total Number Demand : " << total_demand << endl + << "Total Number Prefetches : " << m_outstanding_count - total_demand + << endl << endl << endl; +#endif } -void Sequencer::printConfig(ostream& out) const { - out << "Seqeuncer config: " << m_name << endl; - out << " controller: " << m_controller->getName() << endl; - out << " version: " << m_version << endl; - out << " max_outstanding_requests: " << m_max_outstanding_requests << endl; - out << " deadlock_threshold: " << m_deadlock_threshold << endl; +void +Sequencer::printConfig(ostream& out) const +{ + out << "Seqeuncer config: " << m_name << endl + << " controller: " << m_controller->getName() << endl + << " version: " << m_version << endl + << " max_outstanding_requests: " << m_max_outstanding_requests << endl + << " deadlock_threshold: " << m_deadlock_threshold << endl; } // Insert the request on the correct request table. Return true if // the entry was already present. -bool Sequencer::insertRequest(SequencerRequest* request) { - int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); - - assert(m_outstanding_count == total_outstanding); - - // See if we should schedule a deadlock check - if (deadlockCheckEvent.scheduled() == false) { - schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); - } - - Address line_addr(request->ruby_request.paddr); - line_addr.makeLineAddress(); - if ((request->ruby_request.type == RubyRequestType_ST) || - (request->ruby_request.type == RubyRequestType_RMW_Read) || - (request->ruby_request.type == RubyRequestType_RMW_Write) || - (request->ruby_request.type == RubyRequestType_Locked_Read) || - (request->ruby_request.type == RubyRequestType_Locked_Write)) { - if (m_writeRequestTable.exist(line_addr)) { - m_writeRequestTable.lookup(line_addr) = request; - // return true; - assert(0); // drh5: isn't this an error? do you lose the initial request? +bool +Sequencer::insertRequest(SequencerRequest* request) +{ + int total_outstanding = + m_writeRequestTable.size() + m_readRequestTable.size(); + + assert(m_outstanding_count == total_outstanding); + + // See if we should schedule a deadlock check + if (deadlockCheckEvent.scheduled() == false) { + schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); } - m_writeRequestTable.allocate(line_addr); - m_writeRequestTable.lookup(line_addr) = request; - m_outstanding_count++; - } else { - if (m_readRequestTable.exist(line_addr)) { - m_readRequestTable.lookup(line_addr) = request; - // return true; - assert(0); // drh5: isn't this an error? do you lose the initial request? + + Address line_addr(request->ruby_request.paddr); + line_addr.makeLineAddress(); + if ((request->ruby_request.type == RubyRequestType_ST) || + (request->ruby_request.type == RubyRequestType_RMW_Read) || + (request->ruby_request.type == RubyRequestType_RMW_Write) || + (request->ruby_request.type == RubyRequestType_Locked_Read) || + (request->ruby_request.type == RubyRequestType_Locked_Write)) { + if (m_writeRequestTable.exist(line_addr)) { + m_writeRequestTable.lookup(line_addr) = request; + // return true; + + // drh5: isn't this an error? do you lose the initial request? + assert(0); + } + m_writeRequestTable.allocate(line_addr); + m_writeRequestTable.lookup(line_addr) = request; + m_outstanding_count++; + } else { + if (m_readRequestTable.exist(line_addr)) { + m_readRequestTable.lookup(line_addr) = request; + // return true; + + // drh5: isn't this an error? do you lose the initial request? + assert(0); + } + m_readRequestTable.allocate(line_addr); + m_readRequestTable.lookup(line_addr) = request; + m_outstanding_count++; } - m_readRequestTable.allocate(line_addr); - m_readRequestTable.lookup(line_addr) = request; - m_outstanding_count++; - } - g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); + g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); - total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); - assert(m_outstanding_count == total_outstanding); + total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); + assert(m_outstanding_count == total_outstanding); - return false; + return false; } -void Sequencer::removeRequest(SequencerRequest* srequest) { - - assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); - - const RubyRequest & ruby_request = srequest->ruby_request; - Address line_addr(ruby_request.paddr); - line_addr.makeLineAddress(); - if ((ruby_request.type == RubyRequestType_ST) || - (ruby_request.type == RubyRequestType_RMW_Read) || - (ruby_request.type == RubyRequestType_RMW_Write) || - (ruby_request.type == RubyRequestType_Locked_Read) || - (ruby_request.type == RubyRequestType_Locked_Write)) { - m_writeRequestTable.deallocate(line_addr); - } else { - m_readRequestTable.deallocate(line_addr); - } - m_outstanding_count--; - - assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); -} +void +Sequencer::removeRequest(SequencerRequest* srequest) +{ + assert(m_outstanding_count == + m_writeRequestTable.size() + m_readRequestTable.size()); + + const RubyRequest & ruby_request = srequest->ruby_request; + Address line_addr(ruby_request.paddr); + line_addr.makeLineAddress(); + if ((ruby_request.type == RubyRequestType_ST) || + (ruby_request.type == RubyRequestType_RMW_Read) || + (ruby_request.type == RubyRequestType_RMW_Write) || + (ruby_request.type == RubyRequestType_Locked_Read) || + (ruby_request.type == RubyRequestType_Locked_Write)) { + m_writeRequestTable.deallocate(line_addr); + } else { + m_readRequestTable.deallocate(line_addr); + } + m_outstanding_count--; -void Sequencer::writeCallback(const Address& address, DataBlock& data) { + assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); +} - assert(address == line_address(address)); - assert(m_writeRequestTable.exist(line_address(address))); +void +Sequencer::writeCallback(const Address& address, DataBlock& data) +{ + assert(address == line_address(address)); + assert(m_writeRequestTable.exist(line_address(address))); - SequencerRequest* request = m_writeRequestTable.lookup(address); + SequencerRequest* request = m_writeRequestTable.lookup(address); - removeRequest(request); + removeRequest(request); - assert((request->ruby_request.type == RubyRequestType_ST) || - (request->ruby_request.type == RubyRequestType_RMW_Read) || - (request->ruby_request.type == RubyRequestType_RMW_Write) || - (request->ruby_request.type == RubyRequestType_Locked_Read) || - (request->ruby_request.type == RubyRequestType_Locked_Write)); + assert((request->ruby_request.type == RubyRequestType_ST) || + (request->ruby_request.type == RubyRequestType_RMW_Read) || + (request->ruby_request.type == RubyRequestType_RMW_Write) || + (request->ruby_request.type == RubyRequestType_Locked_Read) || + (request->ruby_request.type == RubyRequestType_Locked_Write)); - if (request->ruby_request.type == RubyRequestType_Locked_Read) { - m_dataCache_ptr->setLocked(address, m_version); - } - else if (request->ruby_request.type == RubyRequestType_RMW_Read) { - m_controller->blockOnQueue(address, m_mandatory_q_ptr); - } - else if (request->ruby_request.type == RubyRequestType_RMW_Write) { - m_controller->unblock(address); - } + if (request->ruby_request.type == RubyRequestType_Locked_Read) { + m_dataCache_ptr->setLocked(address, m_version); + } else if (request->ruby_request.type == RubyRequestType_RMW_Read) { + m_controller->blockOnQueue(address, m_mandatory_q_ptr); + } else if (request->ruby_request.type == RubyRequestType_RMW_Write) { + m_controller->unblock(address); + } - hitCallback(request, data); + hitCallback(request, data); } -void Sequencer::readCallback(const Address& address, DataBlock& data) { - - assert(address == line_address(address)); - assert(m_readRequestTable.exist(line_address(address))); +void +Sequencer::readCallback(const Address& address, DataBlock& data) +{ + assert(address == line_address(address)); + assert(m_readRequestTable.exist(line_address(address))); - SequencerRequest* request = m_readRequestTable.lookup(address); - removeRequest(request); + SequencerRequest* request = m_readRequestTable.lookup(address); + removeRequest(request); - assert((request->ruby_request.type == RubyRequestType_LD) || - (request->ruby_request.type == RubyRequestType_RMW_Read) || - (request->ruby_request.type == RubyRequestType_IFETCH)); + assert((request->ruby_request.type == RubyRequestType_LD) || + (request->ruby_request.type == RubyRequestType_RMW_Read) || + (request->ruby_request.type == RubyRequestType_IFETCH)); - hitCallback(request, data); + hitCallback(request, data); } -void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) { - const RubyRequest & ruby_request = srequest->ruby_request; - Address request_address(ruby_request.paddr); - Address request_line_address(ruby_request.paddr); - request_line_address.makeLineAddress(); - RubyRequestType type = ruby_request.type; - Time issued_time = srequest->issue_time; - - // Set this cache entry to the most recently used - if (type == RubyRequestType_IFETCH) { - if (m_instCache_ptr->isTagPresent(request_line_address) ) - m_instCache_ptr->setMRU(request_line_address); - } else { - if (m_dataCache_ptr->isTagPresent(request_line_address) ) - m_dataCache_ptr->setMRU(request_line_address); - } - - assert(g_eventQueue_ptr->getTime() >= issued_time); - Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; - - // Profile the miss latency for all non-zero demand misses - if (miss_latency != 0) { - g_system_ptr->getProfiler()->missLatency(miss_latency, type); +void +Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) +{ + const RubyRequest & ruby_request = srequest->ruby_request; + Address request_address(ruby_request.paddr); + Address request_line_address(ruby_request.paddr); + request_line_address.makeLineAddress(); + RubyRequestType type = ruby_request.type; + Time issued_time = srequest->issue_time; + + // Set this cache entry to the most recently used + if (type == RubyRequestType_IFETCH) { + if (m_instCache_ptr->isTagPresent(request_line_address)) + m_instCache_ptr->setMRU(request_line_address); + } else { + if (m_dataCache_ptr->isTagPresent(request_line_address)) + m_dataCache_ptr->setMRU(request_line_address); + } + + assert(g_eventQueue_ptr->getTime() >= issued_time); + Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; - if (Debug::getProtocolTrace()) { - g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr), - "", "Done", "", int_to_string(miss_latency)+" cycles"); + // Profile the miss latency for all non-zero demand misses + if (miss_latency != 0) { + g_system_ptr->getProfiler()->missLatency(miss_latency, type); + + if (Debug::getProtocolTrace()) { + g_system_ptr->getProfiler()-> + profileTransition("Seq", m_version, + Address(ruby_request.paddr), "", "Done", "", + csprintf("%d cycles", miss_latency)); + } } - } - /* - if (request.getPrefetch() == PrefetchBit_Yes) { - return; // Ignore the prefetch - } - */ - - // update the data - if (ruby_request.data != NULL) { - if ((type == RubyRequestType_LD) || - (type == RubyRequestType_IFETCH) || - (type == RubyRequestType_RMW_Read) || - (type == RubyRequestType_Locked_Read)) { - - memcpy(ruby_request.data, - data.getData(request_address.getOffset(), ruby_request.len), - ruby_request.len); - - } else { +#if 0 + if (request.getPrefetch() == PrefetchBit_Yes) { + return; // Ignore the prefetch + } +#endif - data.setData(ruby_request.data, - request_address.getOffset(), - ruby_request.len); + // update the data + if (ruby_request.data != NULL) { + if ((type == RubyRequestType_LD) || + (type == RubyRequestType_IFETCH) || + (type == RubyRequestType_RMW_Read) || + (type == RubyRequestType_Locked_Read)) { + + memcpy(ruby_request.data, + data.getData(request_address.getOffset(), ruby_request.len), + ruby_request.len); + } else { + data.setData(ruby_request.data, request_address.getOffset(), + ruby_request.len); + } + } else { + DPRINTF(MemoryAccess, + "WARNING. Data not transfered from Ruby to M5 for type %s\n", + RubyRequestType_to_string(type)); + } + // If using the RubyTester, update the RubyTester sender state's + // subBlock with the recieved data. The tester will later access + // this state. + // Note: RubyPort will access it's sender state before the + // RubyTester. + if (m_usingRubyTester) { + RubyPort::SenderState *requestSenderState = + safe_cast(ruby_request.pkt->senderState); + RubyTester::SenderState* testerSenderState = + safe_cast(requestSenderState->saved); + testerSenderState->subBlock->mergeFrom(data); } - } else { - DPRINTF(MemoryAccess, - "WARNING. Data not transfered from Ruby to M5 for type %s\n", - RubyRequestType_to_string(type)); - } - - // - // If using the RubyTester, update the RubyTester sender state's subBlock - // with the recieved data. The tester will later access this state. - // Note: RubyPort will access it's sender state before the RubyTester. - // - if (m_usingRubyTester) { - RubyTester::SenderState* testerSenderState; - testerSenderState = safe_cast( \ - safe_cast(ruby_request.pkt->senderState)->saved); - testerSenderState->subBlock->mergeFrom(data); - } - - ruby_hit_callback(ruby_request.pkt); - delete srequest; + + ruby_hit_callback(ruby_request.pkt); + delete srequest; } // Returns true if the sequencer already has a load or store outstanding -RequestStatus Sequencer::getRequestStatus(const RubyRequest& request) { - bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr))); - bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr))); - if ( is_outstanding_store ) { - if ((request.type == RubyRequestType_LD) || - (request.type == RubyRequestType_IFETCH) || - (request.type == RubyRequestType_RMW_Read)) { - m_store_waiting_on_load_cycles++; - } else { - m_store_waiting_on_store_cycles++; +RequestStatus +Sequencer::getRequestStatus(const RubyRequest& request) +{ + bool is_outstanding_store = + m_writeRequestTable.exist(line_address(Address(request.paddr))); + bool is_outstanding_load = + m_readRequestTable.exist(line_address(Address(request.paddr))); + if (is_outstanding_store) { + if ((request.type == RubyRequestType_LD) || + (request.type == RubyRequestType_IFETCH) || + (request.type == RubyRequestType_RMW_Read)) { + m_store_waiting_on_load_cycles++; + } else { + m_store_waiting_on_store_cycles++; + } + return RequestStatus_Aliased; + } else if (is_outstanding_load) { + if ((request.type == RubyRequestType_ST) || + (request.type == RubyRequestType_RMW_Write)) { + m_load_waiting_on_store_cycles++; + } else { + m_load_waiting_on_load_cycles++; + } + return RequestStatus_Aliased; } - return RequestStatus_Aliased; - } else if ( is_outstanding_load ) { - if ((request.type == RubyRequestType_ST) || - (request.type == RubyRequestType_RMW_Write) ) { - m_load_waiting_on_store_cycles++; - } else { - m_load_waiting_on_load_cycles++; + + if (m_outstanding_count >= m_max_outstanding_requests) { + return RequestStatus_BufferFull; } - return RequestStatus_Aliased; - } - - if (m_outstanding_count >= m_max_outstanding_requests) { - return RequestStatus_BufferFull; - } - - return RequestStatus_Ready; -} -bool Sequencer::empty() const { - return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0); + return RequestStatus_Ready; } +bool +Sequencer::empty() const +{ + return m_writeRequestTable.size() == 0 && m_readRequestTable.size() == 0; +} -RequestStatus Sequencer::makeRequest(const RubyRequest & request) +RequestStatus +Sequencer::makeRequest(const RubyRequest &request) { - assert(Address(request.paddr).getOffset() + request.len <= - RubySystem::getBlockSizeBytes()); - RequestStatus status = getRequestStatus(request); - if (status == RequestStatus_Ready) { - SequencerRequest *srequest = new SequencerRequest(request, - g_eventQueue_ptr->getTime()); + assert(Address(request.paddr).getOffset() + request.len <= + RubySystem::getBlockSizeBytes()); + RequestStatus status = getRequestStatus(request); + if (status != RequestStatus_Ready) + return status; + + SequencerRequest *srequest = + new SequencerRequest(request, g_eventQueue_ptr->getTime()); bool found = insertRequest(srequest); - if (!found) { - if (request.type == RubyRequestType_Locked_Write) { - // - // NOTE: it is OK to check the locked flag here as the mandatory queue - // will be checked first ensuring that nothing comes between checking - // the flag and servicing the store. - // - if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), - m_version)) { - removeRequest(srequest); - if (Debug::getProtocolTrace()) { - - g_system_ptr->getProfiler()->profileTransition("Seq", - m_version, - Address(request.paddr), - "", - "SC Fail", - "", - RubyRequestType_to_string(request.type)); + if (found) { + panic("Sequencer::makeRequest should never be called if the " + "request is already outstanding\n"); + return RequestStatus_NULL; + } + if (request.type == RubyRequestType_Locked_Write) { + // NOTE: it is OK to check the locked flag here as the + // mandatory queue will be checked first ensuring that nothing + // comes between checking the flag and servicing the store. + + Address line_addr = line_address(Address(request.paddr)); + if (!m_dataCache_ptr->isLocked(line_addr, m_version)) { + removeRequest(srequest); + if (Debug::getProtocolTrace()) { + g_system_ptr->getProfiler()-> + profileTransition("Seq", m_version, + Address(request.paddr), + "", "SC Fail", "", + RubyRequestType_to_string(request.type)); } return RequestStatus_LlscFailed; - } - else { - m_dataCache_ptr->clearLocked(line_address(Address(request.paddr))); + } else { + m_dataCache_ptr->clearLocked(line_addr); } - } - issueRequest(request); + } + issueRequest(request); - // TODO: issue hardware prefetches here - return RequestStatus_Issued; + // TODO: issue hardware prefetches here + return RequestStatus_Issued; +} + +void +Sequencer::issueRequest(const RubyRequest& request) +{ + // TODO: get rid of CacheMsg, CacheRequestType, and + // AccessModeTYpe, & have SLICC use RubyRequest and subtypes + // natively + CacheRequestType ctype; + switch(request.type) { + case RubyRequestType_IFETCH: + ctype = CacheRequestType_IFETCH; + break; + case RubyRequestType_LD: + ctype = CacheRequestType_LD; + break; + case RubyRequestType_ST: + ctype = CacheRequestType_ST; + break; + case RubyRequestType_Locked_Read: + case RubyRequestType_Locked_Write: + ctype = CacheRequestType_ATOMIC; + break; + case RubyRequestType_RMW_Read: + ctype = CacheRequestType_ATOMIC; + break; + case RubyRequestType_RMW_Write: + ctype = CacheRequestType_ATOMIC; + break; + default: + assert(0); } - else { - panic("Sequencer::makeRequest should never be called if the request"\ - "is already outstanding\n"); - return RequestStatus_NULL; + + AccessModeType amtype; + switch(request.access_mode){ + case RubyAccessMode_User: + amtype = AccessModeType_UserMode; + break; + case RubyAccessMode_Supervisor: + amtype = AccessModeType_SupervisorMode; + break; + case RubyAccessMode_Device: + amtype = AccessModeType_UserMode; + break; + default: + assert(0); } - } else { - return status; - } -} -void Sequencer::issueRequest(const RubyRequest& request) { - - // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively - CacheRequestType ctype; - switch(request.type) { - case RubyRequestType_IFETCH: - ctype = CacheRequestType_IFETCH; - break; - case RubyRequestType_LD: - ctype = CacheRequestType_LD; - break; - case RubyRequestType_ST: - ctype = CacheRequestType_ST; - break; - case RubyRequestType_Locked_Read: - case RubyRequestType_Locked_Write: - ctype = CacheRequestType_ATOMIC; - break; - case RubyRequestType_RMW_Read: - ctype = CacheRequestType_ATOMIC; - break; - case RubyRequestType_RMW_Write: - ctype = CacheRequestType_ATOMIC; - break; - default: - assert(0); - } - AccessModeType amtype; - switch(request.access_mode){ - case RubyAccessMode_User: - amtype = AccessModeType_UserMode; - break; - case RubyAccessMode_Supervisor: - amtype = AccessModeType_SupervisorMode; - break; - case RubyAccessMode_Device: - amtype = AccessModeType_UserMode; - break; - default: - assert(0); - } - Address line_addr(request.paddr); - line_addr.makeLineAddress(); - CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id); - - if (Debug::getProtocolTrace()) { - g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr), - "", "Begin", "", RubyRequestType_to_string(request.type)); - } - - if (g_system_ptr->getTracer()->traceEnabled()) { - g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc), - request.type, g_eventQueue_ptr->getTime()); - } - - Time latency = 0; // initialzed to an null value - - if (request.type == RubyRequestType_IFETCH) - latency = m_instCache_ptr->getLatency(); - else - latency = m_dataCache_ptr->getLatency(); - - // Send the message to the cache controller - assert(latency > 0); - - assert(m_mandatory_q_ptr != NULL); - m_mandatory_q_ptr->enqueue(msg, latency); -} -/* -bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, - AccessModeType access_mode, - int size, DataBlock*& data_ptr) { - if (type == CacheRequestType_IFETCH) { - return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); - } else { - return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); - } + Address line_addr(request.paddr); + line_addr.makeLineAddress(); + CacheMsg msg(line_addr, Address(request.paddr), ctype, + Address(request.pc), amtype, request.len, PrefetchBit_No, + request.proc_id); + + if (Debug::getProtocolTrace()) { + g_system_ptr->getProfiler()-> + profileTransition("Seq", m_version, Address(request.paddr), + "", "Begin", "", + RubyRequestType_to_string(request.type)); + } + + if (g_system_ptr->getTracer()->traceEnabled()) { + g_system_ptr->getTracer()-> + traceRequest(this, line_addr, Address(request.pc), + request.type, g_eventQueue_ptr->getTime()); + } + + Time latency = 0; // initialzed to an null value + + if (request.type == RubyRequestType_IFETCH) + latency = m_instCache_ptr->getLatency(); + else + latency = m_dataCache_ptr->getLatency(); + + // Send the message to the cache controller + assert(latency > 0); + + assert(m_mandatory_q_ptr != NULL); + m_mandatory_q_ptr->enqueue(msg, latency); } -*/ -void Sequencer::print(ostream& out) const { - out << "[Sequencer: " << m_version - << ", outstanding requests: " << m_outstanding_count; +#if 0 +bool +Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, + AccessModeType access_mode, + int size, DataBlock*& data_ptr) +{ + CacheMemory *cache = + (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; - out << ", read request table: " << m_readRequestTable - << ", write request table: " << m_writeRequestTable; - out << "]"; + return cache->tryCacheAccess(line_address(addr), type, data_ptr); +} +#endif + +void +Sequencer::print(ostream& out) const +{ + out << "[Sequencer: " << m_version + << ", outstanding requests: " << m_outstanding_count + << ", read request table: " << m_readRequestTable + << ", write request table: " << m_writeRequestTable + << "]"; } -// this can be called from setState whenever coherence permissions are upgraded -// when invoked, coherence violations will be checked for the given block -void Sequencer::checkCoherence(const Address& addr) { +// this can be called from setState whenever coherence permissions are +// upgraded when invoked, coherence violations will be checked for the +// given block +void +Sequencer::checkCoherence(const Address& addr) +{ #ifdef CHECK_COHERENCE - g_system_ptr->checkGlobalCoherenceInvariant(addr); + g_system_ptr->checkGlobalCoherenceInvariant(addr); #endif } - -- cgit v1.2.3