summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSteve Reinhardt <steve.reinhardt@amd.com>2015-03-23 16:14:20 -0700
committerSteve Reinhardt <steve.reinhardt@amd.com>2015-03-23 16:14:20 -0700
commit6677b9122adb3d519843e187f72c0f0268dd732e (patch)
tree63604e61f6b40acac2c7b332e6f89b8cb4d4f206 /src
parentc55749d9989cf472cebc0bc93ccfa5edff0f67a8 (diff)
downloadgem5-6677b9122adb3d519843e187f72c0f0268dd732e.tar.xz
mem: rename Locked/LOCKED to LockedRMW/LOCKED_RMW
Makes x86-style locked operations even more distinct from LLSC operations. Using "locked" by itself should be obviously ambiguous now.
Diffstat (limited to 'src')
-rw-r--r--src/arch/x86/isa/microops/ldstop.isa4
-rw-r--r--src/cpu/simple/atomic.cc4
-rw-r--r--src/mem/request.hh4
-rw-r--r--src/mem/ruby/system/Sequencer.cc2
4 files changed, 7 insertions, 7 deletions
diff --git a/src/arch/x86/isa/microops/ldstop.isa b/src/arch/x86/isa/microops/ldstop.isa
index c26e9932d..bccec36fe 100644
--- a/src/arch/x86/isa/microops/ldstop.isa
+++ b/src/arch/x86/isa/microops/ldstop.isa
@@ -409,7 +409,7 @@ let {{
'(StoreCheck << FlagShift)')
defineMicroLoadOp('Ldstl', 'Data = merge(Data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
- '(StoreCheck << FlagShift) | Request::LOCKED')
+ '(StoreCheck << FlagShift) | Request::LOCKED_RMW')
defineMicroLoadOp('Ldfp', code='FpData_uqw = Mem', big = False)
@@ -461,7 +461,7 @@ let {{
defineMicroStoreOp('St', 'Mem = pick(Data, 2, dataSize);')
defineMicroStoreOp('Stul', 'Mem = pick(Data, 2, dataSize);',
- mem_flags="Request::LOCKED")
+ mem_flags="Request::LOCKED_RMW")
defineMicroStoreOp('Stfp', code='Mem = FpData_uqw;')
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 007621feb..64280bda0 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -373,7 +373,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(!locked);
locked = true;
}
@@ -480,7 +480,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
//stop now.
if (fault != NoFault || secondAddr <= addr)
{
- if (req->isLocked() && fault == NoFault) {
+ if (req->isLockedRMW() && fault == NoFault) {
assert(locked);
locked = false;
}
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 25649a23a..82ede7e60 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -127,7 +127,7 @@ class Request
* made up of a locked load, some operation on the data, and then a locked
* store.
*/
- static const FlagsType LOCKED = 0x00100000;
+ static const FlagsType LOCKED_RMW = 0x00100000;
/** The request is a Load locked/store conditional. */
static const FlagsType LLSC = 0x00200000;
/** This request is for a memory swap. */
@@ -626,7 +626,7 @@ class Request
bool isPrefetch() const { return _flags.isSet(PREFETCH); }
bool isLLSC() const { return _flags.isSet(LLSC); }
bool isPriv() const { return _flags.isSet(PRIVILEGED); }
- bool isLocked() const { return _flags.isSet(LOCKED); }
+ bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index dbf350199..98649dcd5 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -604,7 +604,7 @@ Sequencer::makeRequest(PacketPtr pkt)
primary_type = RubyRequestType_Load_Linked;
}
secondary_type = RubyRequestType_ATOMIC;
- } else if (pkt->req->isLocked()) {
+ } else if (pkt->req->isLockedRMW()) {
//
// x86 locked instructions are translated to store cache coherence
// requests because these requests should always be treated as read