summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system/Sequencer.cc
diff options
context:
space:
mode:
authorJoel Hestness <hestness@cs.utexas.edu>2011-02-06 22:14:18 -0800
committerJoel Hestness <hestness@cs.utexas.edu>2011-02-06 22:14:18 -0800
commit82844618fd91338ad54d3fcf7ea9fa088b04ab1a (patch)
tree11a3d3c89ec754ded752691bfd4277c2b80ae478 /src/mem/ruby/system/Sequencer.cc
parent16c1edebd0a5b75dffc9cf2a561fa19756660558 (diff)
downloadgem5-82844618fd91338ad54d3fcf7ea9fa088b04ab1a.tar.xz
Ruby: Add support for locked memory accesses in X86_FS
Diffstat (limited to 'src/mem/ruby/system/Sequencer.cc')
-rw-r--r--src/mem/ruby/system/Sequencer.cc43
1 files changed, 30 insertions, 13 deletions
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 66829ed45..97122dc69 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -232,7 +232,9 @@ Sequencer::insertRequest(SequencerRequest* request)
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional)) {
+ (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
@@ -291,7 +293,9 @@ Sequencer::removeRequest(SequencerRequest* srequest)
(ruby_request.type == RubyRequestType_RMW_Read) ||
(ruby_request.type == RubyRequestType_RMW_Write) ||
(ruby_request.type == RubyRequestType_Load_Linked) ||
- (ruby_request.type == RubyRequestType_Store_Conditional)) {
+ (ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
m_writeRequestTable.erase(line_addr);
} else {
m_readRequestTable.erase(line_addr);
@@ -379,7 +383,9 @@ Sequencer::writeCallback(const Address& address,
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional));
+ (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
@@ -387,9 +393,9 @@ Sequencer::writeCallback(const Address& address,
//
bool success = handleLlsc(address, request);
- if (request->ruby_request.type == RubyRequestType_RMW_Read) {
+ if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
- } else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
+ } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
m_controller->unblock(address);
}
@@ -430,7 +436,6 @@ Sequencer::readCallback(const Address& address,
markRemoved();
assert((request->ruby_request.type == RubyRequestType_LD) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
@@ -501,8 +506,8 @@ Sequencer::hitCallback(SequencerRequest* srequest,
if ((type == RubyRequestType_LD) ||
(type == RubyRequestType_IFETCH) ||
(type == RubyRequestType_RMW_Read) ||
+ (type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
-
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.len),
ruby_request.len);
@@ -612,18 +617,30 @@ Sequencer::issueRequest(const RubyRequest& request)
ctype = CacheRequestType_LD;
break;
case RubyRequestType_ST:
+ case RubyRequestType_RMW_Read:
+ case RubyRequestType_RMW_Write:
+ //
+ // x86 locked instructions are translated to store cache coherence
+ // requests because these requests should always be treated as read
+ // exclusive operations and should leverage any migratory sharing
+ // optimization built into the protocol.
+ //
+ case RubyRequestType_Locked_RMW_Read:
+ case RubyRequestType_Locked_RMW_Write:
ctype = CacheRequestType_ST;
break;
+ //
+ // Alpha LL/SC instructions need to be handled carefully by the cache
+ // coherence protocol to ensure they follow the proper semantics. In
+ // particular, by identifying the operations as atomic, the protocol
+ // should understand that migratory sharing optimizations should not be
+ // performed (i.e. a load between the LL and SC should not steal away
+ // exclusive permission).
+ //
case RubyRequestType_Load_Linked:
case RubyRequestType_Store_Conditional:
ctype = CacheRequestType_ATOMIC;
break;
- case RubyRequestType_RMW_Read:
- ctype = CacheRequestType_ATOMIC;
- break;
- case RubyRequestType_RMW_Write:
- ctype = CacheRequestType_ATOMIC;
- break;
default:
assert(0);
}