summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:12 -0700
committerBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:12 -0700
commit8e5c441a54b481085d6311f14af66e41b5766f91 (patch)
tree8d0e6f8395685fb8a7fb2eda83e572301e687d1a /src
parent54d76f0ce5d721ad3b4de168db98054844e634cc (diff)
downloadgem5-8e5c441a54b481085d6311f14af66e41b5766f91.tar.xz
ruby: fix ruby llsc support to sync sc outcomes
Added support so that ruby can determine the outcome of store conditional operations and reflect that outcome to M5 physical memory and cpus.
Diffstat (limited to 'src')
-rw-r--r--src/mem/packet.hh24
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm2
-rw-r--r--src/mem/ruby/system/CacheMemory.cc4
-rw-r--r--src/mem/ruby/system/RubyPort.cc49
-rw-r--r--src/mem/ruby/system/Sequencer.cc63
-rw-r--r--src/mem/ruby/system/Sequencer.hh1
6 files changed, 101 insertions, 42 deletions
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 2c94da8bd..390d9672f 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -463,6 +463,30 @@ class Packet : public FastAlloc, public Printable
Addr getOffset(int blkSize) const { return getAddr() & (Addr)(blkSize - 1); }
/**
+ * It has been determined that the SC packet should successfully update
+ * memory. Therefore, convert this SC packet to a normal write.
+ */
+ void
+ convertScToWrite()
+ {
+ assert(isLLSC());
+ assert(isWrite());
+ cmd = MemCmd::WriteReq;
+ }
+
+ /**
+ * When ruby is in use, Ruby will monitor the cache line and thus M5
+ * phys memory should treat LL ops as normal reads.
+ */
+ void
+ convertLlToRead()
+ {
+ assert(isLLSC());
+ assert(isRead());
+ cmd = MemCmd::ReadReq;
+ }
+
+ /**
* Constructor. Note that a Request object must be constructed
* first, but the Requests's physical address and size fields need
* not be valid. The command and destination addresses must be
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index d29620d16..4cea7c379 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -53,7 +53,6 @@ enumeration(AccessPermission, desc="...", default="AccessPermission_NotPresent")
Read_Write, desc="Read/Write";
Invalid, desc="Invalid";
NotPresent, desc="NotPresent";
- OnHold, desc="Holding a place in dnuca cache";
ReadUpgradingToWrite, desc="Read only, but trying to get Read/Write";
Stale, desc="local L1 has a modified copy, assume L2 copy is stale data";
}
@@ -345,6 +344,5 @@ enumeration(RequestStatus, desc="...", default="RequestStatus_NULL") {
Issued, desc="The sequencer successfully issued the request";
BufferFull, desc="Can not issue because the sequencer is full";
Aliased, desc="This request aliased with a currently outstanding request";
- LlscFailed, desc="The write failed in the Load-Link Store-Conditional pair";
NULL, desc="";
}
diff --git a/src/mem/ruby/system/CacheMemory.cc b/src/mem/ruby/system/CacheMemory.cc
index 9102d1963..604113238 100644
--- a/src/mem/ruby/system/CacheMemory.cc
+++ b/src/mem/ruby/system/CacheMemory.cc
@@ -353,7 +353,9 @@ CacheMemory::changePermission(const Address& address,
lookup(address).m_Permission = new_perm;
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
- if (new_perm != AccessPermission_Read_Write) {
+ if ((new_perm == AccessPermission_Invalid) ||
+ (new_perm == AccessPermission_NotPresent) ||
+ (new_perm == AccessPermission_Stale)) {
DPRINTF(RubyCache, "Permission clearing lock for addr: %x\n", address);
m_locked[cacheSet][loc] = -1;
}
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index 87a98185c..a8edb03b2 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -229,23 +229,10 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
// Submit the ruby request
RequestStatus requestStatus = ruby_port->makeRequest(ruby_request);
- // If the request successfully issued or the SC request completed because
- // exclusive permission was lost, then we should return true.
+ // If the request successfully issued then we should return true.
// Otherwise, we need to delete the senderStatus we just created and return
// false.
- if ((requestStatus == RequestStatus_Issued) ||
- (requestStatus == RequestStatus_LlscFailed)) {
-
- // The communicate to M5 whether the SC command succeeded by seting the
- // packet's extra data.
- if (pkt->isLLSC() && pkt->isWrite()) {
- if (requestStatus == RequestStatus_LlscFailed) {
- DPRINTF(MemoryAccess, "SC failed and request completed\n");
- pkt->req->setExtraData(0);
- } else {
- pkt->req->setExtraData(1);
- }
- }
+ if (requestStatus == RequestStatus_Issued) {
return true;
}
@@ -280,9 +267,39 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
{
bool needsResponse = pkt->needsResponse();
+ //
+ // All responses except failed SC operations access M5 physical memory
+ //
+ bool accessPhysMem = true;
+
+ if (pkt->isLLSC()) {
+ if (pkt->isWrite()) {
+ if (pkt->req->getExtraData() != 0) {
+ //
+ // Successful SC packets convert to normal writes
+ //
+ pkt->convertScToWrite();
+ } else {
+ //
+ // Failed SC packets don't access physical memory and thus
+ // the RubyPort itself must convert it to a response.
+ //
+ accessPhysMem = false;
+ pkt->makeAtomicResponse();
+ }
+ } else {
+ //
+ // All LL packets convert to normal loads so that M5 PhysMem does
+ // not lock the blocks.
+ //
+ pkt->convertLlToRead();
+ }
+ }
DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse);
- ruby_port->physMemPort->sendAtomic(pkt);
+ if (accessPhysMem) {
+ ruby_port->physMemPort->sendAtomic(pkt);
+ }
// turn packet around to go back to requester if response expected
if (needsResponse) {
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index e4f85908f..600f95d10 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -41,6 +41,7 @@
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
+#include "mem/packet.hh"
#include "params/RubySequencer.hh"
using namespace std;
@@ -303,6 +304,38 @@ Sequencer::removeRequest(SequencerRequest* srequest)
}
void
+Sequencer::handleLlscWrites(const Address& address, SequencerRequest* request)
+{
+ if (request->ruby_request.type == RubyRequestType_Locked_Write) {
+ if (!m_dataCache_ptr->isLocked(address, m_version)) {
+ //
+ // For failed SC requests, indicate the failure to the cpu by
+ // setting the extra data to zero.
+ //
+ request->ruby_request.pkt->req->setExtraData(0);
+ } else {
+ //
+ // For successful SC requests, indicate the success to the cpu by
+ // setting the extra data to one.
+ //
+ request->ruby_request.pkt->req->setExtraData(1);
+ }
+ m_dataCache_ptr->clearLocked(address);
+ } else if (request->ruby_request.type == RubyRequestType_Locked_Read) {
+ //
+ // Note: To fully follow Alpha LLSC semantics, should the LL clear any
+ // previously locked cache lines?
+ //
+ m_dataCache_ptr->setLocked(address, m_version);
+ } else if (m_dataCache_ptr->isLocked(address, m_version)) {
+ //
+ // Normal writes should clear the locked address
+ //
+ m_dataCache_ptr->clearLocked(address);
+ }
+}
+
+void
Sequencer::writeCallback(const Address& address, DataBlock& data)
{
writeCallback(address, GenericMachineType_NULL, data);
@@ -329,9 +362,13 @@ Sequencer::writeCallback(const Address& address,
(request->ruby_request.type == RubyRequestType_Locked_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_Write));
- if (request->ruby_request.type == RubyRequestType_Locked_Read) {
- m_dataCache_ptr->setLocked(address, m_version);
- } else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
+ //
+ // For Alpha, properly handle LL, SC, and write requests with respect to
+ // locked cache blocks.
+ //
+ handleLlscWrites(address, request);
+
+ if (request->ruby_request.type == RubyRequestType_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
} else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
m_controller->unblock(address);
@@ -504,26 +541,6 @@ Sequencer::makeRequest(const RubyRequest &request)
return RequestStatus_NULL;
}
- if (request.type == RubyRequestType_Locked_Write) {
- // NOTE: it is OK to check the locked flag here as the
- // mandatory queue will be checked first ensuring that nothing
- // comes between checking the flag and servicing the store.
-
- Address line_addr = line_address(Address(request.paddr));
- if (!m_dataCache_ptr->isLocked(line_addr, m_version)) {
- removeRequest(srequest);
- if (Debug::getProtocolTrace()) {
- g_system_ptr->getProfiler()->
- profileTransition("Seq", m_version,
- Address(request.paddr),
- "", "SC Fail", "",
- RubyRequestType_to_string(request.type));
- }
- return RequestStatus_LlscFailed;
- } else {
- m_dataCache_ptr->clearLocked(line_addr);
- }
- }
issueRequest(request);
// TODO: issue hardware prefetches here
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
index fd6b390c2..c298e27dd 100644
--- a/src/mem/ruby/system/Sequencer.hh
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -109,6 +109,7 @@ class Sequencer : public RubyPort, public Consumer
bool insertRequest(SequencerRequest* request);
+ void handleLlscWrites(const Address& address, SequencerRequest* request);
// Private copy constructor and assignment operator
Sequencer(const Sequencer& obj);