summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system
diff options
context:
space:
mode:
authorSomayeh Sardashti <somayeh@cs.wisc.edu>2011-03-28 10:49:45 -0500
committerSomayeh Sardashti <somayeh@cs.wisc.edu>2011-03-28 10:49:45 -0500
commitc8bbfed93752c2c79d36bb9dedbc2208b856dae6 (patch)
treec33a164e435603a4424f81f7f09ec50b5f01e455 /src/mem/ruby/system
parentef987a4064f1e81fd1b61f3de03834a51658645f (diff)
downloadgem5-c8bbfed93752c2c79d36bb9dedbc2208b856dae6.tar.xz
This patch supports cache flushing in MOESI_hammer
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r--src/mem/ruby/system/RubyPort.cc14
-rw-r--r--src/mem/ruby/system/Sequencer.cc12
2 files changed, 21 insertions, 5 deletions
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index 92627740f..6d5cb71bf 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -244,6 +244,8 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
// Note: M5 packets do not differentiate ST from RMW_Write
//
type = RubyRequestType_ST;
+ } else if (pkt->isFlush()) {
+ type = RubyRequestType_FLUSH;
} else {
panic("Unsupported ruby packet type\n");
}
@@ -335,7 +337,7 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
//
// Unless specified at configuraiton, all responses except failed SC
- // operations access M5 physical memory.
+ // and Flush operations access M5 physical memory.
//
bool accessPhysMem = access_phys_mem;
@@ -361,11 +363,19 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
pkt->convertLlToRead();
}
}
+
+ //
+ // Flush requests don't access physical memory
+ //
+ if (pkt->isFlush()) {
+ accessPhysMem = false;
+ }
+
DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
if (accessPhysMem) {
ruby_port->physMemPort->sendAtomic(pkt);
- } else {
+ } else if (needsResponse) {
pkt->makeResponse();
}
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 7eb46e006..94ba6c2d3 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -234,7 +234,8 @@ Sequencer::insertRequest(SequencerRequest* request)
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_FLUSH)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
@@ -338,7 +339,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
// previously locked cache lines?
//
m_dataCache_ptr->setLocked(address, m_version);
- } else if (m_dataCache_ptr->isLocked(address, m_version)) {
+ } else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) {
//
// Normal writes should clear the locked address
//
@@ -385,7 +386,9 @@ Sequencer::writeCallback(const Address& address,
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write));
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_FLUSH));
+
//
// For Alpha, properly handle LL, SC, and write requests with respect to
@@ -619,6 +622,9 @@ Sequencer::issueRequest(const RubyRequest& request)
case RubyRequestType_LD:
ctype = RubyRequestType_LD;
break;
+ case RubyRequestType_FLUSH:
+ ctype = RubyRequestType_FLUSH;
+ break;
case RubyRequestType_ST:
case RubyRequestType_RMW_Read:
case RubyRequestType_RMW_Write: