diff options
author | Joel Hestness <hestness@cs.utexas.edu> | 2011-02-06 22:14:18 -0800 |
---|---|---|
committer | Joel Hestness <hestness@cs.utexas.edu> | 2011-02-06 22:14:18 -0800 |
commit | 82844618fd91338ad54d3fcf7ea9fa088b04ab1a (patch) | |
tree | 11a3d3c89ec754ded752691bfd4277c2b80ae478 /src | |
parent | 16c1edebd0a5b75dffc9cf2a561fa19756660558 (diff) | |
download | gem5-82844618fd91338ad54d3fcf7ea9fa088b04ab1a.tar.xz |
Ruby: Add support for locked memory accesses in X86_FS
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/ruby/libruby.cc | 8 | ||||
-rw-r--r-- | src/mem/ruby/libruby.hh | 2 | ||||
-rw-r--r-- | src/mem/ruby/system/DMASequencer.cc | 2 | ||||
-rw-r--r-- | src/mem/ruby/system/RubyPort.cc | 36 | ||||
-rw-r--r-- | src/mem/ruby/system/Sequencer.cc | 43 |
5 files changed, 70 insertions, 21 deletions
diff --git a/src/mem/ruby/libruby.cc b/src/mem/ruby/libruby.cc index fe4cbcd52..11700b770 100644 --- a/src/mem/ruby/libruby.cc +++ b/src/mem/ruby/libruby.cc @@ -58,6 +58,10 @@ RubyRequestType_to_string(const RubyRequestType& obj) return "RMW_Read"; case RubyRequestType_RMW_Write: return "RMW_Write"; + case RubyRequestType_Locked_RMW_Read: + return "Locked_RMW_Read"; + case RubyRequestType_Locked_RMW_Write: + return "Locked_RMW_Write"; case RubyRequestType_NULL: default: assert(0); @@ -82,6 +86,10 @@ string_to_RubyRequestType(string str) return RubyRequestType_RMW_Read; else if (str == "RMW_Write") return RubyRequestType_RMW_Write; + else if (str == "Locked_RMW_Read") + return RubyRequestType_Locked_RMW_Read; + else if (str == "Locked_RMW_Write") + return RubyRequestType_Locked_RMW_Write; else assert(0); return RubyRequestType_NULL; diff --git a/src/mem/ruby/libruby.hh b/src/mem/ruby/libruby.hh index 70b5bde20..6dd301578 100644 --- a/src/mem/ruby/libruby.hh +++ b/src/mem/ruby/libruby.hh @@ -44,6 +44,8 @@ enum RubyRequestType { RubyRequestType_Store_Conditional, RubyRequestType_RMW_Read, RubyRequestType_RMW_Write, + RubyRequestType_Locked_RMW_Read, + RubyRequestType_Locked_RMW_Write, RubyRequestType_NUM }; diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc index f864c1018..63e1f76e6 100644 --- a/src/mem/ruby/system/DMASequencer.cc +++ b/src/mem/ruby/system/DMASequencer.cc @@ -70,6 +70,8 @@ DMASequencer::makeRequest(const RubyRequest &request) case RubyRequestType_Store_Conditional: case RubyRequestType_RMW_Read: case RubyRequestType_RMW_Write: + case RubyRequestType_Locked_RMW_Read: + case RubyRequestType_Locked_RMW_Write: case RubyRequestType_NUM: panic("DMASequencer::makeRequest does not support RubyRequestType"); return RequestStatus_NULL; diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc index 7dbc22e19..d1c306bb2 100644 --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -26,6 +26,10 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include "config/the_isa.hh" +#if THE_ISA == X86_ISA +#include "arch/x86/insts/microldstop.hh" +#endif // X86_ISA #include "cpu/testers/rubytest/RubyTester.hh" #include "mem/physical.hh" #include "mem/ruby/slicc_interface/AbstractController.hh" @@ -201,22 +205,38 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt) assert(pkt->isRead()); type = RubyRequestType_Load_Linked; } + } else if (pkt->req->isLocked()) { + if (pkt->isWrite()) { + DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n"); + type = RubyRequestType_Locked_RMW_Write; + } else { + DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n"); + assert(pkt->isRead()); + type = RubyRequestType_Locked_RMW_Read; + } } else { if (pkt->isRead()) { if (pkt->req->isInstFetch()) { type = RubyRequestType_IFETCH; } else { - type = RubyRequestType_LD; +#if THE_ISA == X86_ISA + uint32_t flags = pkt->req->getFlags(); + bool storeCheck = flags & + (TheISA::StoreCheck << TheISA::FlagShift); +#else + bool storeCheck = false; +#endif // X86_ISA + if (storeCheck) { + type = RubyRequestType_RMW_Read; + } else { + type = RubyRequestType_LD; + } } } else if (pkt->isWrite()) { + // + // Note: M5 packets do not differentiate ST from RMW_Write + // type = RubyRequestType_ST; - } else if (pkt->isReadWrite()) { - // Fix me. This conditional will never be executed - // because isReadWrite() is just an OR of isRead() and - // isWrite(). Furthermore, just because the packet is a - // read/write request does not necessary mean it is a - // read-modify-write atomic operation. - type = RubyRequestType_RMW_Write; } else { panic("Unsupported ruby packet type\n"); } diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index 66829ed45..97122dc69 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -232,7 +232,9 @@ Sequencer::insertRequest(SequencerRequest* request) (request->ruby_request.type == RubyRequestType_RMW_Read) || (request->ruby_request.type == RubyRequestType_RMW_Write) || (request->ruby_request.type == RubyRequestType_Load_Linked) || - (request->ruby_request.type == RubyRequestType_Store_Conditional)) { + (request->ruby_request.type == RubyRequestType_Store_Conditional) || + (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) || + (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) { pair<RequestTable::iterator, bool> r = m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); bool success = r.second; @@ -291,7 +293,9 @@ Sequencer::removeRequest(SequencerRequest* srequest) (ruby_request.type == RubyRequestType_RMW_Read) || (ruby_request.type == RubyRequestType_RMW_Write) || (ruby_request.type == RubyRequestType_Load_Linked) || - (ruby_request.type == RubyRequestType_Store_Conditional)) { + (ruby_request.type == RubyRequestType_Store_Conditional) || + (ruby_request.type == RubyRequestType_Locked_RMW_Read) || + (ruby_request.type == RubyRequestType_Locked_RMW_Write)) { m_writeRequestTable.erase(line_addr); } else { m_readRequestTable.erase(line_addr); @@ -379,7 +383,9 @@ Sequencer::writeCallback(const Address& address, (request->ruby_request.type == RubyRequestType_RMW_Read) || (request->ruby_request.type == RubyRequestType_RMW_Write) || (request->ruby_request.type == RubyRequestType_Load_Linked) || - (request->ruby_request.type == RubyRequestType_Store_Conditional)); + (request->ruby_request.type == RubyRequestType_Store_Conditional) || + (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) || + (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)); // // For Alpha, properly handle LL, SC, and write requests with respect to @@ -387,9 +393,9 @@ Sequencer::writeCallback(const Address& address, // bool success = handleLlsc(address, request); - if (request->ruby_request.type == RubyRequestType_RMW_Read) { + if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) { m_controller->blockOnQueue(address, m_mandatory_q_ptr); - } else if (request->ruby_request.type == RubyRequestType_RMW_Write) { + } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) { m_controller->unblock(address); } @@ -430,7 +436,6 @@ Sequencer::readCallback(const Address& address, markRemoved(); assert((request->ruby_request.type == RubyRequestType_LD) || - (request->ruby_request.type == RubyRequestType_RMW_Read) || (request->ruby_request.type == RubyRequestType_IFETCH)); hitCallback(request, mach, data, true, @@ -501,8 +506,8 @@ Sequencer::hitCallback(SequencerRequest* srequest, if ((type == RubyRequestType_LD) || (type == RubyRequestType_IFETCH) || (type == RubyRequestType_RMW_Read) || + (type == RubyRequestType_Locked_RMW_Read) || (type == RubyRequestType_Load_Linked)) { - memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len); @@ -612,18 +617,30 @@ Sequencer::issueRequest(const RubyRequest& request) ctype = CacheRequestType_LD; break; case RubyRequestType_ST: + case RubyRequestType_RMW_Read: + case RubyRequestType_RMW_Write: + // + // x86 locked instructions are translated to store cache coherence + // requests because these requests should always be treated as read + // exclusive operations and should leverage any migratory sharing + // optimization built into the protocol. + // + case RubyRequestType_Locked_RMW_Read: + case RubyRequestType_Locked_RMW_Write: ctype = CacheRequestType_ST; break; + // + // Alpha LL/SC instructions need to be handled carefully by the cache + // coherence protocol to ensure they follow the proper semantics. In + // particular, by identifying the operations as atomic, the protocol + // should understand that migratory sharing optimizations should not be + // performed (i.e. a load between the LL and SC should not steal away + // exclusive permission). + // case RubyRequestType_Load_Linked: case RubyRequestType_Store_Conditional: ctype = CacheRequestType_ATOMIC; break; - case RubyRequestType_RMW_Read: - ctype = CacheRequestType_ATOMIC; - break; - case RubyRequestType_RMW_Write: - ctype = CacheRequestType_ATOMIC; - break; default: assert(0); } |