summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/cache/blk.hh69
1 files changed, 38 insertions, 31 deletions
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index a32edbf20..6682dd4bb 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -134,8 +134,9 @@ class CacheBlk
Addr lowAddr; // low address of lock range
Addr highAddr; // high address of lock range
- // check for matching execution context
- bool matchesContext(const RequestPtr req) const
+ // check for matching execution context, and an address that
+ // is within the lock
+ bool matches(const RequestPtr req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() -1;
@@ -143,7 +144,8 @@ class CacheBlk
(req_low >= lowAddr) && (req_high <= highAddr);
}
- bool overlapping(const RequestPtr req) const
+ // check if a request is intersecting and thus invalidating the lock
+ bool intersects(const RequestPtr req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() - 1;
@@ -214,7 +216,7 @@ class CacheBlk
{
status = 0;
isTouched = false;
- clearLoadLocks();
+ lockList.clear();
}
/**
@@ -246,35 +248,35 @@ class CacheBlk
}
/**
- * Track the fact that a local locked was issued to the block. If
- * multiple LLs get issued from the same context we could have
- * redundant records on the list, but that's OK, as they'll all
- * get blown away at the next store.
+ * Track the fact that a local locked was issued to the
+ * block. Invalidate any previous LL to the same address.
*/
void trackLoadLocked(PacketPtr pkt)
{
assert(pkt->isLLSC());
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(pkt->req))
+ l = lockList.erase(l);
+ else
+ ++l;
+ }
+
lockList.emplace_front(pkt->req);
}
/**
- * Clear the list of valid load locks. Should be called whenever
- * block is written to or invalidated.
+ * Clear the any load lock that intersect the request, and is from
+ * a different context.
*/
- void clearLoadLocks(RequestPtr req = nullptr)
+ void clearLoadLocks(RequestPtr req)
{
- if (!req) {
- // No request, invaldate all locks to this line
- lockList.clear();
- } else {
- // Only invalidate locks that overlap with this request
- auto lock_itr = lockList.begin();
- while (lock_itr != lockList.end()) {
- if (lock_itr->overlapping(req)) {
- lock_itr = lockList.erase(lock_itr);
- } else {
- ++lock_itr;
- }
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(req) && l->contextId != req->contextId()) {
+ l = lockList.erase(l);
+ } else {
+ ++l;
}
}
}
@@ -344,22 +346,27 @@ class CacheBlk
// load locked.
bool success = false;
- for (const auto& l : lockList) {
- if (l.matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
+ auto l = lockList.begin();
+ while (l != lockList.end() && !success) {
+ if (l->matches(pkt->req)) {
+ // it's a store conditional, and as far as the
+ // memory system can tell, the requesting
+ // context's lock is still valid.
success = true;
- break;
+ lockList.erase(l);
}
+ ++l;
}
req->setExtraData(success ? 1 : 0);
+ // clear any intersected locks from other contexts (our LL
+ // should already have cleared them)
clearLoadLocks(req);
return success;
} else {
- // for *all* stores (conditional or otherwise) we have to
- // clear the list of load-locks as they're all invalid now.
+ // a normal write, if there is any lock not from this
+ // context we clear the list, thus for a private cache we
+ // never clear locks on normal writes
clearLoadLocks(req);
return true;
}