diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/arch/alpha/isa/decoder.isa | 8 | ||||
-rw-r--r-- | src/arch/alpha/isa/pal.isa | 2 | ||||
-rw-r--r-- | src/arch/mips/isa/decoder.isa | 4 | ||||
-rw-r--r-- | src/cpu/checker/cpu.cc | 4 | ||||
-rw-r--r-- | src/cpu/inorder/resources/cache_unit.cc | 6 | ||||
-rw-r--r-- | src/cpu/o3/lsq_unit.hh | 4 | ||||
-rw-r--r-- | src/cpu/o3/lsq_unit_impl.hh | 2 | ||||
-rw-r--r-- | src/cpu/ozone/back_end_impl.hh | 2 | ||||
-rw-r--r-- | src/cpu/ozone/inorder_back_end.hh | 2 | ||||
-rw-r--r-- | src/cpu/ozone/lsq_unit_impl.hh | 6 | ||||
-rw-r--r-- | src/cpu/ozone/lw_lsq.hh | 4 | ||||
-rw-r--r-- | src/cpu/ozone/lw_lsq_impl.hh | 8 | ||||
-rw-r--r-- | src/cpu/simple/atomic.cc | 6 | ||||
-rw-r--r-- | src/cpu/simple/timing.cc | 12 | ||||
-rw-r--r-- | src/mem/cache/blk.hh | 4 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 4 | ||||
-rw-r--r-- | src/mem/packet.cc | 6 | ||||
-rw-r--r-- | src/mem/packet.hh | 6 | ||||
-rw-r--r-- | src/mem/physical.cc | 12 | ||||
-rw-r--r-- | src/mem/physical.hh | 6 | ||||
-rw-r--r-- | src/mem/request.hh | 4 |
21 files changed, 56 insertions, 56 deletions
diff --git a/src/arch/alpha/isa/decoder.isa b/src/arch/alpha/isa/decoder.isa index 0b2a31410..278ce31e8 100644 --- a/src/arch/alpha/isa/decoder.isa +++ b/src/arch/alpha/isa/decoder.isa @@ -45,8 +45,8 @@ decode OPCODE default Unknown::unknown() { 0x0c: ldwu({{ Ra.uq = Mem.uw; }}); 0x0b: ldq_u({{ Ra = Mem.uq; }}, ea_code = {{ EA = (Rb + disp) & ~7; }}); 0x23: ldt({{ Fa = Mem.df; }}); - 0x2a: ldl_l({{ Ra.sl = Mem.sl; }}, mem_flags = LOCKED); - 0x2b: ldq_l({{ Ra.uq = Mem.uq; }}, mem_flags = LOCKED); + 0x2a: ldl_l({{ Ra.sl = Mem.sl; }}, mem_flags = LLSC); + 0x2b: ldq_l({{ Ra.uq = Mem.uq; }}, mem_flags = LLSC); #ifdef USE_COPY 0x20: MiscPrefetch::copy_load({{ EA = Ra; }}, {{ fault = xc->copySrcTranslate(EA); }}, @@ -87,7 +87,7 @@ decode OPCODE default Unknown::unknown() { if (tmp == 1) { xc->setStCondFailures(0); } - }}, mem_flags = LOCKED, inst_flags = IsStoreConditional); + }}, mem_flags = LLSC, inst_flags = IsStoreConditional); 0x2f: stq_c({{ Mem.uq = Ra; }}, {{ uint64_t tmp = write_result; @@ -105,7 +105,7 @@ decode OPCODE default Unknown::unknown() { // only. xc->setStCondFailures(0); } - }}, mem_flags = LOCKED, inst_flags = IsStoreConditional); + }}, mem_flags = LLSC, inst_flags = IsStoreConditional); } format IntegerOperate { diff --git a/src/arch/alpha/isa/pal.isa b/src/arch/alpha/isa/pal.isa index 3d3b81600..0931c1aec 100644 --- a/src/arch/alpha/isa/pal.isa +++ b/src/arch/alpha/isa/pal.isa @@ -178,7 +178,7 @@ output decoder {{ if (HW_LDST_PHYS) memAccessFlags.set(Request::PHYSICAL); if (HW_LDST_ALT) memAccessFlags.set(Request::ALTMODE); if (HW_LDST_VPTE) memAccessFlags.set(Request::VPTE); - if (HW_LDST_LOCK) memAccessFlags.set(Request::LOCKED); + if (HW_LDST_LOCK) memAccessFlags.set(Request::LLSC); } std::string diff --git a/src/arch/mips/isa/decoder.isa b/src/arch/mips/isa/decoder.isa index 68a63a458..a463093ec 100644 --- a/src/arch/mips/isa/decoder.isa +++ b/src/arch/mips/isa/decoder.isa @@ -2089,7 +2089,7 @@ decode OPCODE_HI default Unknown::unknown() { 0x6: decode OPCODE_LO { format LoadMemory { - 0x0: ll({{ Rt.uw = Mem.uw; }}, mem_flags=LOCKED); + 0x0: ll({{ Rt.uw = Mem.uw; }}, mem_flags=LLSC); 0x1: lwc1({{ Ft.uw = Mem.uw; }}); 0x5: ldc1({{ Ft.ud = Mem.ud; }}); } @@ -2103,7 +2103,7 @@ decode OPCODE_HI default Unknown::unknown() { 0x0: StoreCond::sc({{ Mem.uw = Rt.uw;}}, {{ uint64_t tmp = write_result; Rt.uw = (tmp == 0 || tmp == 1) ? tmp : Rt.uw; - }}, mem_flags=LOCKED, inst_flags = IsStoreConditional); + }}, mem_flags=LLSC, inst_flags = IsStoreConditional); format StoreMemory { 0x1: swc1({{ Mem.uw = Ft.uw;}}); diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc index 1c36ad22d..4305f7ab0 100644 --- a/src/cpu/checker/cpu.cc +++ b/src/cpu/checker/cpu.cc @@ -240,8 +240,8 @@ CheckerCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) // verify this data. if (unverifiedReq && !(unverifiedReq->isUncacheable()) && - (!(unverifiedReq->isLocked()) || - ((unverifiedReq->isLocked()) && + (!(unverifiedReq->isLlsc()) || + ((unverifiedReq->isLlsc()) && unverifiedReq->getExtraData() == 1))) { T inst_data; /* diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc index 57bcb10ef..ceaaf3532 100644 --- a/src/cpu/inorder/resources/cache_unit.cc +++ b/src/cpu/inorder/resources/cache_unit.cc @@ -355,7 +355,7 @@ CacheUnit::doDataAccess(DynInstPtr inst) Request *memReq = cache_req->dataPkt->req; - if (cache_req->dataPkt->isWrite() && memReq->isLocked()) { + if (cache_req->dataPkt->isWrite() && memReq->isLlsc()) { assert(cache_req->inst->isStoreConditional()); DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n"); do_access = TheISA::handleLockedWrite(cpu, memReq); @@ -395,7 +395,7 @@ CacheUnit::doDataAccess(DynInstPtr inst) cacheStatus = cacheWaitResponse; cacheBlocked = false; } - } else if (!do_access && memReq->isLocked()){ + } else if (!do_access && memReq->isLlsc()){ // Store-Conditional instructions complete even if they "failed" assert(cache_req->inst->isStoreConditional()); cache_req->setCompleted(true); @@ -471,7 +471,7 @@ CacheUnit::processCacheCompletion(PacketPtr pkt) if (inst->isLoad()) { assert(cache_pkt->isRead()); - if (cache_pkt->req->isLocked()) { + if (cache_pkt->req->isLlsc()) { DPRINTF(InOrderCachePort, "[tid:%u]: Handling Load-Linked for [sn:%u]\n", tid, inst->seqNum); diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index 5323e3a47..9f0f38f06 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -514,7 +514,7 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx) "storeHead: %i addr: %#x\n", load_idx, store_idx, storeHead, req->getPaddr()); - if (req->isLocked()) { + if (req->isLlsc()) { // Disable recording the result temporarily. Writing to misc // regs normally updates the result, but this is not the // desired behavior when handling store conditionals. @@ -647,7 +647,7 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx) if (!lsq->cacheBlocked()) { PacketPtr data_pkt = new Packet(req, - (req->isLocked() ? + (req->isLlsc() ? MemCmd::LoadLockedReq : MemCmd::ReadReq), Packet::Broadcast); data_pkt->dataStatic(load_inst->memData); diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index 85662d496..f5753a4ef 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -652,7 +652,7 @@ LSQUnit<Impl>::writebackStores() MemCmd command = req->isSwap() ? MemCmd::SwapReq : - (req->isLocked() ? MemCmd::StoreCondReq : MemCmd::WriteReq); + (req->isLlsc() ? MemCmd::StoreCondReq : MemCmd::WriteReq); PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast); data_pkt->dataStatic(inst->memData); diff --git a/src/cpu/ozone/back_end_impl.hh b/src/cpu/ozone/back_end_impl.hh index 415407c52..ef3b0f182 100644 --- a/src/cpu/ozone/back_end_impl.hh +++ b/src/cpu/ozone/back_end_impl.hh @@ -1256,7 +1256,7 @@ BackEnd<Impl>::executeInsts() // ++iewExecStoreInsts; - if (!(inst->req->isLocked())) { + if (!(inst->req->isLlsc())) { inst->setExecuted(); instToCommit(inst); diff --git a/src/cpu/ozone/inorder_back_end.hh b/src/cpu/ozone/inorder_back_end.hh index e930144be..b30b37a22 100644 --- a/src/cpu/ozone/inorder_back_end.hh +++ b/src/cpu/ozone/inorder_back_end.hh @@ -381,7 +381,7 @@ InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx) } } /* - if (req->isLocked()) { + if (req->isLlsc()) { if (req->isUncacheable()) { // Don't update result register (see stq_c in isa_desc) req->result = 2; diff --git a/src/cpu/ozone/lsq_unit_impl.hh b/src/cpu/ozone/lsq_unit_impl.hh index c24410520..7e7bbdb01 100644 --- a/src/cpu/ozone/lsq_unit_impl.hh +++ b/src/cpu/ozone/lsq_unit_impl.hh @@ -577,7 +577,7 @@ OzoneLSQ<Impl>::writebackStores() MemAccessResult result = dcacheInterface->access(req); //@todo temp fix for LL/SC (works fine for 1 CPU) - if (req->isLocked()) { + if (req->isLlsc()) { req->result=1; panic("LL/SC! oh no no support!!!"); } @@ -596,7 +596,7 @@ OzoneLSQ<Impl>::writebackStores() Event *wb = NULL; /* typename IEW::LdWritebackEvent *wb = NULL; - if (req->isLocked()) { + if (req->isLlsc()) { // Stx_C does not generate a system port transaction. req->result=0; wb = new typename IEW::LdWritebackEvent(storeQueue[storeWBIdx].inst, @@ -630,7 +630,7 @@ OzoneLSQ<Impl>::writebackStores() // DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n", // storeQueue[storeWBIdx].inst->seqNum); - if (req->isLocked()) { + if (req->isLlsc()) { // Stx_C does not generate a system port transaction. req->result=1; typename BackEnd::LdWritebackEvent *wb = diff --git a/src/cpu/ozone/lw_lsq.hh b/src/cpu/ozone/lw_lsq.hh index 4f8101bc0..3ad8d1d64 100644 --- a/src/cpu/ozone/lw_lsq.hh +++ b/src/cpu/ozone/lw_lsq.hh @@ -635,7 +635,7 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx) PacketPtr data_pkt = new Packet(req, - (req->isLocked() ? + (req->isLlsc() ? MemCmd::LoadLockedReq : Packet::ReadReq), Packet::Broadcast); data_pkt->dataStatic(inst->memData); @@ -662,7 +662,7 @@ OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx) return NoFault; } - if (req->isLocked()) { + if (req->isLlsc()) { cpu->lockFlag = true; } diff --git a/src/cpu/ozone/lw_lsq_impl.hh b/src/cpu/ozone/lw_lsq_impl.hh index 00e52e039..3943dab2d 100644 --- a/src/cpu/ozone/lw_lsq_impl.hh +++ b/src/cpu/ozone/lw_lsq_impl.hh @@ -589,7 +589,7 @@ OzoneLWLSQ<Impl>::writebackStores() MemCmd command = req->isSwap() ? MemCmd::SwapReq : - (req->isLocked() ? MemCmd::WriteReq : MemCmd::StoreCondReq); + (req->isLlsc() ? MemCmd::WriteReq : MemCmd::StoreCondReq); PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast); data_pkt->dataStatic(inst->memData); @@ -606,7 +606,7 @@ OzoneLWLSQ<Impl>::writebackStores() inst->seqNum); // @todo: Remove this SC hack once the memory system handles it. - if (req->isLocked()) { + if (req->isLlsc()) { if (req->isUncacheable()) { req->setExtraData(2); } else { @@ -664,7 +664,7 @@ OzoneLWLSQ<Impl>::writebackStores() if (result != MA_HIT && dcacheInterface->doEvents()) { store_event->miss = true; typename BackEnd::LdWritebackEvent *wb = NULL; - if (req->isLocked()) { + if (req->isLlsc()) { wb = new typename BackEnd::LdWritebackEvent(inst, be); store_event->wbEvent = wb; @@ -691,7 +691,7 @@ OzoneLWLSQ<Impl>::writebackStores() // DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n", // inst->seqNum); - if (req->isLocked()) { + if (req->isLlsc()) { // Stx_C does not generate a system port // transaction in the 21264, but that might be // hard to accomplish in this model. diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 3ce0ba172..045b0160f 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -322,7 +322,7 @@ AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) // Now do the access. if (fault == NoFault) { Packet pkt = Packet(req, - req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, + req->isLlsc() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, Packet::Broadcast); pkt.dataStatic(dataPtr); @@ -338,7 +338,7 @@ AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) assert(!pkt.isError()); - if (req->isLocked()) { + if (req->isLlsc()) { TheISA::handleLockedRead(thread, req); } } @@ -462,7 +462,7 @@ AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) MemCmd cmd = MemCmd::WriteReq; // default bool do_access = true; // flag to suppress cache access - if (req->isLocked()) { + if (req->isLlsc()) { cmd = MemCmd::StoreCondReq; do_access = TheISA::handleLockedWrite(thread, req); } else if (req->isSwap()) { diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 64c4108a7..905acb6d4 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -290,7 +290,7 @@ TimingSimpleCPU::sendData(Fault fault, RequestPtr req, } else { bool do_access = true; // flag to suppress cache access - if (req->isLocked()) { + if (req->isLlsc()) { do_access = TheISA::handleLockedWrite(thread, req); } else if (req->isCondSwap()) { assert(res); @@ -384,11 +384,11 @@ TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) MemCmd cmd; if (read) { cmd = MemCmd::ReadReq; - if (req->isLocked()) + if (req->isLlsc()) cmd = MemCmd::LoadLockedReq; } else { cmd = MemCmd::WriteReq; - if (req->isLocked()) { + if (req->isLlsc()) { cmd = MemCmd::StoreCondReq; } else if (req->isSwap()) { cmd = MemCmd::SwapReq; @@ -452,7 +452,7 @@ TimingSimpleCPU::read(Addr addr, T &data, unsigned flags) _status = DTBWaitResponse; if (split_addr > addr) { RequestPtr req1, req2; - assert(!req->isLocked() && !req->isSwap()); + assert(!req->isLlsc() && !req->isSwap()); req->splitOnVaddr(split_addr, req1, req2); typedef SplitDataTranslation::WholeTranslationState WholeState; @@ -571,7 +571,7 @@ TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) _status = DTBWaitResponse; if (split_addr > addr) { RequestPtr req1, req2; - assert(!req->isLocked() && !req->isSwap()); + assert(!req->isLlsc() && !req->isSwap()); req->splitOnVaddr(split_addr, req1, req2); typedef SplitDataTranslation::WholeTranslationState WholeState; @@ -904,7 +904,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt) // the locked flag may be cleared on the response packet, so check // pkt->req and not pkt to see if it was a load-locked - if (pkt->isRead() && pkt->req->isLocked()) { + if (pkt->isRead() && pkt->req->isLlsc()) { TheISA::handleLockedRead(thread, pkt->req); } diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh index fe65672d6..acb117f6c 100644 --- a/src/mem/cache/blk.hh +++ b/src/mem/cache/blk.hh @@ -218,7 +218,7 @@ class CacheBlk */ void trackLoadLocked(PacketPtr pkt) { - assert(pkt->isLocked()); + assert(pkt->isLlsc()); lockList.push_front(Lock(pkt->req)); } @@ -236,7 +236,7 @@ class CacheBlk bool checkWrite(PacketPtr pkt) { Request *req = pkt->req; - if (pkt->isLocked()) { + if (pkt->isLlsc()) { // it's a store conditional... have to check for matching // load locked. bool success = false; diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index a78fd3637..f98d6ac34 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -180,7 +180,7 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk) pkt->writeDataToBlock(blk->data, blkSize); } } else if (pkt->isRead()) { - if (pkt->isLocked()) { + if (pkt->isLlsc()) { blk->trackLoadLocked(pkt); } pkt->setDataFromBlock(blk->data, blkSize); @@ -317,7 +317,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, incMissCount(pkt); - if (blk == NULL && pkt->isLocked() && pkt->isWrite()) { + if (blk == NULL && pkt->isLlsc() && pkt->isWrite()) { // complete miss on store conditional... just give up now pkt->req->setExtraData(0); return true; diff --git a/src/mem/packet.cc b/src/mem/packet.cc index 38b8879e5..2f84e4414 100644 --- a/src/mem/packet.cc +++ b/src/mem/packet.cc @@ -105,14 +105,14 @@ MemCmd::commandInfo[] = InvalidCmd, "ReadExResp" }, /* LoadLockedReq: note that we use plain ReadResp as response, so that * we can also use ReadRespWithInvalidate when needed */ - { SET4(IsRead, IsLocked, IsRequest, NeedsResponse), + { SET4(IsRead, IsLlsc, IsRequest, NeedsResponse), ReadResp, "LoadLockedReq" }, /* StoreCondReq */ - { SET6(IsWrite, NeedsExclusive, IsLocked, + { SET6(IsWrite, NeedsExclusive, IsLlsc, IsRequest, NeedsResponse, HasData), StoreCondResp, "StoreCondReq" }, /* StoreCondResp */ - { SET4(IsWrite, NeedsExclusive, IsLocked, IsResponse), + { SET4(IsWrite, NeedsExclusive, IsLlsc, IsResponse), InvalidCmd, "StoreCondResp" }, /* SwapReq -- for Swap ldstub type operations */ { SET6(IsRead, IsWrite, NeedsExclusive, IsRequest, HasData, NeedsResponse), diff --git a/src/mem/packet.hh b/src/mem/packet.hh index 41f599fa0..965482c02 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -120,7 +120,7 @@ class MemCmd NeedsResponse, //!< Requester needs response from target IsSWPrefetch, IsHWPrefetch, - IsLocked, //!< Alpha/MIPS LL or SC access + IsLlsc, //!< Alpha/MIPS LL or SC access HasData, //!< There is an associated payload IsError, //!< Error response IsPrint, //!< Print state matching address (for debugging) @@ -166,7 +166,7 @@ class MemCmd bool isInvalidate() const { return testCmdAttrib(IsInvalidate); } bool hasData() const { return testCmdAttrib(HasData); } bool isReadWrite() const { return isRead() && isWrite(); } - bool isLocked() const { return testCmdAttrib(IsLocked); } + bool isLlsc() const { return testCmdAttrib(IsLlsc); } bool isError() const { return testCmdAttrib(IsError); } bool isPrint() const { return testCmdAttrib(IsPrint); } @@ -401,7 +401,7 @@ class Packet : public FastAlloc, public Printable bool isInvalidate() const { return cmd.isInvalidate(); } bool hasData() const { return cmd.hasData(); } bool isReadWrite() const { return cmd.isReadWrite(); } - bool isLocked() const { return cmd.isLocked(); } + bool isLlsc() const { return cmd.isLlsc(); } bool isError() const { return cmd.isError(); } bool isPrint() const { return cmd.isPrint(); } diff --git a/src/mem/physical.cc b/src/mem/physical.cc index 16ff3de6d..86ecb506f 100644 --- a/src/mem/physical.cc +++ b/src/mem/physical.cc @@ -125,7 +125,7 @@ PhysicalMemory::calculateLatency(PacketPtr pkt) // Add load-locked to tracking list. Should only be called if the -// operation is a load and the LOCKED flag is set. +// operation is a load and the LLSC flag is set. void PhysicalMemory::trackLoadLocked(PacketPtr pkt) { @@ -162,12 +162,12 @@ PhysicalMemory::checkLockedAddrList(PacketPtr pkt) { Request *req = pkt->req; Addr paddr = LockedAddr::mask(req->getPaddr()); - bool isLocked = pkt->isLocked(); + bool isLlsc = pkt->isLlsc(); // Initialize return value. Non-conditional stores always // succeed. Assume conditional stores will fail until proven // otherwise. - bool success = !isLocked; + bool success = !isLlsc; // Iterate over list. Note that there could be multiple matching // records, as more than one context could have done a load locked @@ -179,7 +179,7 @@ PhysicalMemory::checkLockedAddrList(PacketPtr pkt) if (i->addr == paddr) { // we have a matching address - if (isLocked && i->matchesContext(req)) { + if (isLlsc && i->matchesContext(req)) { // it's a store conditional, and as far as the memory // system can tell, the requesting context's lock is // still valid. @@ -199,7 +199,7 @@ PhysicalMemory::checkLockedAddrList(PacketPtr pkt) } } - if (isLocked) { + if (isLlsc) { req->setExtraData(success ? 1 : 0); } @@ -284,7 +284,7 @@ PhysicalMemory::doAtomicAccess(PacketPtr pkt) TRACE_PACKET("Read/Write"); } else if (pkt->isRead()) { assert(!pkt->isWrite()); - if (pkt->isLocked()) { + if (pkt->isLlsc()) { trackLoadLocked(pkt); } if (pmemAddr) diff --git a/src/mem/physical.hh b/src/mem/physical.hh index d18138ecd..2a3bea7a5 100644 --- a/src/mem/physical.hh +++ b/src/mem/physical.hh @@ -129,11 +129,11 @@ class PhysicalMemory : public MemObject Request *req = pkt->req; if (lockedAddrList.empty()) { // no locked addrs: nothing to check, store_conditional fails - bool isLocked = pkt->isLocked(); - if (isLocked) { + bool isLlsc = pkt->isLlsc(); + if (isLlsc) { req->setExtraData(0); } - return !isLocked; // only do write if not an sc + return !isLlsc; // only do write if not an sc } else { // iterate over list... return checkLockedAddrList(pkt); diff --git a/src/mem/request.hh b/src/mem/request.hh index ee62ce771..9e8208260 100644 --- a/src/mem/request.hh +++ b/src/mem/request.hh @@ -62,7 +62,7 @@ class Request : public FastAlloc /** ASI information for this request if it exists. */ static const FlagsType ASI_BITS = 0x000000FF; /** The request is a Load locked/store conditional. */ - static const FlagsType LOCKED = 0x00000100; + static const FlagsType LLSC = 0x00000100; /** The virtual address is also the physical address. */ static const FlagsType PHYSICAL = 0x00000200; /** The request is an ALPHA VPTE pal access (hw_ld). */ @@ -448,7 +448,7 @@ class Request : public FastAlloc /** Accessor Function to Check Cacheability. */ bool isUncacheable() const { return flags.isSet(UNCACHEABLE); } bool isInstRead() const { return flags.isSet(INST_READ); } - bool isLocked() const { return flags.isSet(LOCKED); } + bool isLlsc() const { return flags.isSet(LLSC); } bool isSwap() const { return flags.isSet(MEM_SWAP|MEM_SWAP_COND); } bool isCondSwap() const { return flags.isSet(MEM_SWAP_COND); } |