summaryrefslogtreecommitdiff
path: root/src/mem/cache/blk.hh
diff options
context:
space:
mode:
authorMitch Hayenga <mitch.hayenga+gem5@gmail.com>2013-01-08 08:54:07 -0500
committerMitch Hayenga <mitch.hayenga+gem5@gmail.com>2013-01-08 08:54:07 -0500
commitc7dbd5e7686bbb065dfe2a74b92f4d9463ddfa80 (patch)
treec7032e991c9e5d15e1d06f4695be9538e33ba3de /src/mem/cache/blk.hh
parentdc4a0aa2fa1bc3767785b552159ab0ebe5baa72e (diff)
downloadgem5-c7dbd5e7686bbb065dfe2a74b92f4d9463ddfa80.tar.xz
mem: Make LL/SC locks fine grained
The current implementation in gem5 just keeps a list of locks per cacheline. Due to this, a store to a non-overlapping portion of the cacheline can cause an LL/SC pair to fail. This patch simply adds an address range to the lock structure, so that the lock is only invalidated if the store overlaps the lock range.
Diffstat (limited to 'src/mem/cache/blk.hh')
-rw-r--r--src/mem/cache/blk.hh41
1 files changed, 36 insertions, 5 deletions
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index 3557bc489..80216ff89 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -127,15 +127,30 @@ class CacheBlk
class Lock {
public:
int contextId; // locking context
+ Addr lowAddr; // low address of lock range
+ Addr highAddr; // high address of lock range
// check for matching execution context
bool matchesContext(Request *req)
{
- return (contextId == req->contextId());
+ Addr req_low = req->getPaddr();
+ Addr req_high = req_low + req->getSize() -1;
+ return (contextId == req->contextId()) &&
+ (req_low >= lowAddr) && (req_high <= highAddr);
+ }
+
+ bool overlapping(Request *req)
+ {
+ Addr req_low = req->getPaddr();
+ Addr req_high = req_low + req->getSize() - 1;
+
+ return (req_low <= highAddr) && (req_high >= lowAddr);
}
Lock(Request *req)
- : contextId(req->contextId())
+ : contextId(req->contextId()),
+ lowAddr(req->getPaddr()),
+ highAddr(lowAddr + req->getSize() - 1)
{
}
};
@@ -255,7 +270,23 @@ class CacheBlk
* Clear the list of valid load locks. Should be called whenever
* block is written to or invalidated.
*/
- void clearLoadLocks() { lockList.clear(); }
+ void clearLoadLocks(Request *req = NULL)
+ {
+ if (!req) {
+ // No request, invaldate all locks to this line
+ lockList.clear();
+ } else {
+ // Only invalidate locks that overlap with this request
+ std::list<Lock>::iterator lock_itr = lockList.begin();
+ while (lock_itr != lockList.end()) {
+ if (lock_itr->overlapping(req)) {
+ lock_itr = lockList.erase(lock_itr);
+ } else {
+ ++lock_itr;
+ }
+ }
+ }
+ }
/**
* Handle interaction of load-locked operations and stores.
@@ -283,12 +314,12 @@ class CacheBlk
}
req->setExtraData(success ? 1 : 0);
- clearLoadLocks();
+ clearLoadLocks(req);
return success;
} else {
// for *all* stores (conditional or otherwise) we have to
// clear the list of load-locks as they're all invalid now.
- clearLoadLocks();
+ clearLoadLocks(req);
return true;
}
}