summaryrefslogtreecommitdiff
path: root/src/cpu/simple
diff options
context:
space:
mode:
authorSteve Reinhardt <steve.reinhardt@amd.com>2015-03-23 16:14:20 -0700
committerSteve Reinhardt <steve.reinhardt@amd.com>2015-03-23 16:14:20 -0700
commit6677b9122adb3d519843e187f72c0f0268dd732e (patch)
tree63604e61f6b40acac2c7b332e6f89b8cb4d4f206 /src/cpu/simple
parentc55749d9989cf472cebc0bc93ccfa5edff0f67a8 (diff)
downloadgem5-6677b9122adb3d519843e187f72c0f0268dd732e.tar.xz
mem: rename Locked/LOCKED to LockedRMW/LOCKED_RMW
Makes x86-style locked operations even more distinct from LLSC operations. Using "locked" by itself should be obviously ambiguous now.
Diffstat (limited to 'src/cpu/simple')
-rw-r--r--src/cpu/simple/atomic.cc4
1 files changed, 2 insertions, 2 deletions
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 007621feb..64280bda0 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -373,7 +373,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(!locked);
locked = true;
}
@@ -480,7 +480,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
//stop now.
if (fault != NoFault || secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(locked);
locked = false;
}