summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.cc25
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh5
-rw-r--r--src/mem/ruby/structures/CacheMemory.cc6
-rw-r--r--src/mem/ruby/structures/CacheMemory.hh5
-rw-r--r--src/mem/ruby/system/Sequencer.cc30
5 files changed, 52 insertions, 19 deletions
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
index 01fd3f522..416aea73b 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
@@ -28,6 +28,9 @@
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
+#include "base/trace.hh"
+#include "debug/RubyCache.hh"
+
AbstractCacheEntry::AbstractCacheEntry()
{
m_Permission = AccessPermission_NotPresent;
@@ -48,3 +51,25 @@ AbstractCacheEntry::changePermission(AccessPermission new_perm)
m_locked = -1;
}
}
+
+void
+AbstractCacheEntry::setLocked(int context)
+{
+ DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", m_Address, context);
+ m_locked = context;
+}
+
+void
+AbstractCacheEntry::clearLocked()
+{
+ DPRINTF(RubyCache, "Clear Lock for addr: %x\n", m_Address);
+ m_locked = -1;
+}
+
+bool
+AbstractCacheEntry::isLocked(int context) const
+{
+ DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
+ m_Address, m_locked, context);
+ return m_locked == context;
+}
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
index 6c7a4a008..2b318957f 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -56,6 +56,11 @@ class AbstractCacheEntry : public AbstractEntry
virtual DataBlock& getDataBlk()
{ panic("getDataBlk() not implemented!"); }
+ // Functions for locking and unlocking the cache entry. These are required
+ // for supporting atomic memory accesses.
+ void setLocked(int context);
+ void clearLocked();
+ bool isLocked(int context) const;
Addr m_Address; // Address of this block, required by CacheMemory
int m_locked; // Holds info whether the address is locked,
diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc
index 7eba450c1..bb26ff03c 100644
--- a/src/mem/ruby/structures/CacheMemory.cc
+++ b/src/mem/ruby/structures/CacheMemory.cc
@@ -413,7 +413,7 @@ CacheMemory::setLocked(Addr address, int context)
int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->m_locked = context;
+ m_cache[cacheSet][loc]->setLocked(context);
}
void
@@ -424,7 +424,7 @@ CacheMemory::clearLocked(Addr address)
int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->m_locked = -1;
+ m_cache[cacheSet][loc]->clearLocked();
}
bool
@@ -436,7 +436,7 @@ CacheMemory::isLocked(Addr address, int context)
assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
address, m_cache[cacheSet][loc]->m_locked, context);
- return m_cache[cacheSet][loc]->m_locked == context;
+ return m_cache[cacheSet][loc]->isLocked(context);
}
void
diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh
index 08551ab87..6c719cb4f 100644
--- a/src/mem/ruby/structures/CacheMemory.hh
+++ b/src/mem/ruby/structures/CacheMemory.hh
@@ -107,6 +107,11 @@ class CacheMemory : public SimObject
// Set this address to most recently used
void setMRU(Addr address);
+ // Functions for locking and unlocking cache lines corresponding to the
+ // provided address. These are required for supporting atomic memory
+ // accesses. These are to be used when only the address of the cache entry
+ // is available. In case the entry itself is available. use the functions
+ // provided by the AbstractCacheEntry class.
void setLocked (Addr addr, int context);
void clearLocked (Addr addr);
bool isLocked (Addr addr, int context);
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 305758798..b21c70743 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -317,28 +317,27 @@ Sequencer::removeRequest(SequencerRequest* srequest)
void
Sequencer::invalidateSC(Addr address)
{
- RequestTable::iterator i = m_writeRequestTable.find(address);
- if (i != m_writeRequestTable.end()) {
- SequencerRequest* request = i->second;
- // The controller has lost the coherence permissions, hence the lock
- // on the cache line maintained by the cache should be cleared.
- if (request->m_type == RubyRequestType_Store_Conditional) {
- m_dataCache_ptr->clearLocked(address);
- }
+ AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
+ // The controller has lost the coherence permissions, hence the lock
+ // on the cache line maintained by the cache should be cleared.
+ if (e && e->isLocked(m_version)) {
+ e->clearLocked();
}
}
bool
Sequencer::handleLlsc(Addr address, SequencerRequest* request)
{
- //
+ AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
+ if (!e)
+ return true;
+
// The success flag indicates whether the LLSC operation was successful.
// LL ops will always succeed, but SC may fail if the cache line is no
// longer locked.
- //
bool success = true;
if (request->m_type == RubyRequestType_Store_Conditional) {
- if (!m_dataCache_ptr->isLocked(address, m_version)) {
+ if (!e->isLocked(m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
// setting the extra data to zero.
@@ -355,19 +354,18 @@ Sequencer::handleLlsc(Addr address, SequencerRequest* request)
//
// Independent of success, all SC operations must clear the lock
//
- m_dataCache_ptr->clearLocked(address);
+ e->clearLocked();
} else if (request->m_type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
//
- m_dataCache_ptr->setLocked(address, m_version);
- } else if ((m_dataCache_ptr->isTagPresent(address)) &&
- (m_dataCache_ptr->isLocked(address, m_version))) {
+ e->setLocked(m_version);
+ } else if (e->isLocked(m_version)) {
//
// Normal writes should clear the locked address
//
- m_dataCache_ptr->clearLocked(address);
+ e->clearLocked();
}
return success;
}