diff options
author | Ali Saidi <Ali.Saidi@ARM.com> | 2014-01-24 15:29:30 -0600 |
---|---|---|
committer | Ali Saidi <Ali.Saidi@ARM.com> | 2014-01-24 15:29:30 -0600 |
commit | 6bed6e0352a68723ea55017b3e09a8c279af11ec (patch) | |
tree | f7fb2a163ea470144a424bf21a7dd578754546af | |
parent | d3444c6603afe38b00036292a854f52069b90a80 (diff) | |
download | gem5-6bed6e0352a68723ea55017b3e09a8c279af11ec.tar.xz |
cpu: Add CPU support for generatig wake up events when LLSC adresses are snooped.
This patch add support for generating wake-up events in the CPU when an address
that is currently in the exclusive state is hit by a snoop. This mechanism is required
for ARMv8 multi-processor support.
-rw-r--r-- | src/arch/alpha/locked_mem.hh | 7 | ||||
-rw-r--r-- | src/arch/arm/locked_mem.hh | 20 | ||||
-rw-r--r-- | src/arch/mips/locked_mem.hh | 8 | ||||
-rw-r--r-- | src/arch/power/locked_mem.hh | 8 | ||||
-rw-r--r-- | src/arch/sparc/locked_mem.hh | 8 | ||||
-rw-r--r-- | src/arch/x86/locked_mem.hh | 8 | ||||
-rw-r--r-- | src/cpu/base.hh | 3 | ||||
-rw-r--r-- | src/cpu/base_dyn_inst.hh | 2 | ||||
-rw-r--r-- | src/cpu/inorder/resources/cache_unit.cc | 14 | ||||
-rw-r--r-- | src/cpu/o3/lsq_unit_impl.hh | 32 | ||||
-rw-r--r-- | src/cpu/simple/atomic.cc | 34 | ||||
-rw-r--r-- | src/cpu/simple/atomic.hh | 33 | ||||
-rw-r--r-- | src/cpu/simple/timing.cc | 12 | ||||
-rw-r--r-- | src/cpu/simple/timing.hh | 12 |
14 files changed, 167 insertions, 34 deletions
diff --git a/src/arch/alpha/locked_mem.hh b/src/arch/alpha/locked_mem.hh index e62ed1654..253b94be4 100644 --- a/src/arch/alpha/locked_mem.hh +++ b/src/arch/alpha/locked_mem.hh @@ -93,10 +93,15 @@ handleLockedRead(XC *xc, Request *req) xc->setMiscReg(MISCREG_LOCKFLAG, true); } +template <class XC> +inline void +handleLockedSnoopHit(XC *xc) +{ +} template <class XC> inline bool -handleLockedWrite(XC *xc, Request *req) +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { if (req->isUncacheable()) { // Funky Turbolaser mailbox access...don't update diff --git a/src/arch/arm/locked_mem.hh b/src/arch/arm/locked_mem.hh index 37973ff98..f2601f00c 100644 --- a/src/arch/arm/locked_mem.hh +++ b/src/arch/arm/locked_mem.hh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012-2013 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -66,9 +66,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) return; Addr locked_addr = xc->readMiscReg(MISCREG_LOCKADDR) & cacheBlockMask; - Addr snoop_addr = pkt->getAddr(); - - assert((cacheBlockMask & snoop_addr) == snoop_addr); + Addr snoop_addr = pkt->getAddr() & cacheBlockMask; if (locked_addr == snoop_addr) xc->setMiscReg(MISCREG_LOCKFLAG, false); @@ -76,16 +74,22 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template <class XC> inline void +handleLockedSnoopHit(XC *xc) +{ +} + +template <class XC> +inline void handleLockedRead(XC *xc, Request *req) { - xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr() & ~0xf); + xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr()); xc->setMiscReg(MISCREG_LOCKFLAG, true); } template <class XC> inline bool -handleLockedWrite(XC *xc, Request *req) +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { if (req->isSwap()) return true; @@ -93,8 +97,8 @@ handleLockedWrite(XC *xc, Request *req) // Verify that the lock flag is still set and the address // is correct bool lock_flag = xc->readMiscReg(MISCREG_LOCKFLAG); - Addr lock_addr = xc->readMiscReg(MISCREG_LOCKADDR); - if (!lock_flag || (req->getPaddr() & ~0xf) != lock_addr) { + Addr lock_addr = xc->readMiscReg(MISCREG_LOCKADDR) & cacheBlockMask; + if (!lock_flag || (req->getPaddr() & cacheBlockMask) != lock_addr) { // Lock flag not set or addr mismatch in CPU; // don't even bother sending to memory system req->setExtraData(0); diff --git a/src/arch/mips/locked_mem.hh b/src/arch/mips/locked_mem.hh index b4003fea9..5b0f8a1b8 100644 --- a/src/arch/mips/locked_mem.hh +++ b/src/arch/mips/locked_mem.hh @@ -87,8 +87,14 @@ handleLockedRead(XC *xc, Request *req) } template <class XC> +inline void +handleLockedSnoopHit(XC *xc) +{ +} + +template <class XC> inline bool -handleLockedWrite(XC *xc, Request *req) +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { if (req->isUncacheable()) { // Funky Turbolaser mailbox access...don't update diff --git a/src/arch/power/locked_mem.hh b/src/arch/power/locked_mem.hh index f3d042d5c..d962f9aff 100644 --- a/src/arch/power/locked_mem.hh +++ b/src/arch/power/locked_mem.hh @@ -60,8 +60,14 @@ handleLockedRead(XC *xc, Request *req) } template <class XC> +inline void +handleLockedSnoopHit(XC *xc) +{ +} + +template <class XC> inline bool -handleLockedWrite(XC *xc, Request *req) +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { return true; } diff --git a/src/arch/sparc/locked_mem.hh b/src/arch/sparc/locked_mem.hh index 8277ef487..b28179481 100644 --- a/src/arch/sparc/locked_mem.hh +++ b/src/arch/sparc/locked_mem.hh @@ -54,10 +54,16 @@ handleLockedRead(XC *xc, Request *req) { } +template <class XC> +inline void +handleLockedSnoopHit(XC *xc) +{ +} + template <class XC> inline bool -handleLockedWrite(XC *xc, Request *req) +handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { return true; } diff --git a/src/arch/x86/locked_mem.hh b/src/arch/x86/locked_mem.hh index c2a8395aa..51cfb2ea3 100644 --- a/src/arch/x86/locked_mem.hh +++ b/src/arch/x86/locked_mem.hh @@ -56,10 +56,16 @@ namespace X86ISA template <class XC> inline bool - handleLockedWrite(XC *xc, Request *req) + handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) { return true; } + + template <class XC> + inline void + handleLockedSnoopHit(XC *xc) + { + } } #endif // __ARCH_X86_LOCKEDMEM_HH__ diff --git a/src/cpu/base.hh b/src/cpu/base.hh index 540c72833..515f6a5a2 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -261,6 +261,9 @@ class BaseCPU : public MemObject /// Given a thread num get tho thread context for it virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; } + /// Get the number of thread contexts available + unsigned numContexts() { return threadContexts.size(); } + public: typedef BaseCPUParams Params; const Params *params() const diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh index f12a89bbd..3ce7de75d 100644 --- a/src/cpu/base_dyn_inst.hh +++ b/src/cpu/base_dyn_inst.hh @@ -164,6 +164,8 @@ class BaseDynInst : public RefCounted /** Pointer to the Impl's CPU object. */ ImplCPU *cpu; + BaseCPU *getCpuPtr() { return cpu; } + /** Pointer to the thread state. */ ImplState *thread; diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc index 9a46641ac..c71678a91 100644 --- a/src/cpu/inorder/resources/cache_unit.cc +++ b/src/cpu/inorder/resources/cache_unit.cc @@ -1,4 +1,16 @@ /* + * Copyright (c) 2013 ARM Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * * Copyright (c) 2007 MIPS Technologies, Inc. * All rights reserved. * @@ -863,7 +875,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, if (mem_req->isLLSC()) { assert(cache_req->inst->isStoreConditional()); DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n"); - do_access = TheISA::handleLockedWrite(inst.get(), mem_req); + do_access = TheISA::handleLockedWrite(inst.get(), mem_req, cacheBlkSize); } } diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index 277fe48d2..7ec59e38d 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -1,6 +1,6 @@ /* - * Copyright (c) 2010-2012 ARM Limited + * Copyright (c) 2010-2013 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -433,12 +433,13 @@ void LSQUnit<Impl>::checkSnoop(PacketPtr pkt) { int load_idx = loadHead; + DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); // Unlock the cpu-local monitor when the CPU sees a snoop to a locked // address. The CPU can speculatively execute a LL operation after a pending // SC operation in the pipeline and that can make the cache monitor the CPU // is connected to valid while it really shouldn't be. - for (int x = 0; x < cpu->numActiveThreads(); x++) { + for (int x = 0; x < cpu->numContexts(); x++) { ThreadContext *tc = cpu->getContext(x); bool no_squash = cpu->thread[x]->noSquashFromTC; cpu->thread[x]->noSquashFromTC = true; @@ -446,13 +447,23 @@ LSQUnit<Impl>::checkSnoop(PacketPtr pkt) cpu->thread[x]->noSquashFromTC = no_squash; } + Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; + + DynInstPtr ld_inst = loadQueue[load_idx]; + if (ld_inst) { + Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; + // Check that this snoop didn't just invalidate our lock flag + if (ld_inst->effAddrValid() && load_addr == invalidate_addr && + ld_inst->memReqFlags & Request::LLSC) + TheISA::handleLockedSnoopHit(ld_inst.get()); + } + // If this is the only load in the LSQ we don't care if (load_idx == loadTail) return; + incrLdIdx(load_idx); - DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); - Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; while (load_idx != loadTail) { DynInstPtr ld_inst = loadQueue[load_idx]; @@ -468,11 +479,20 @@ LSQUnit<Impl>::checkSnoop(PacketPtr pkt) if (load_addr == invalidate_addr) { if (ld_inst->possibleLoadViolation()) { DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", - ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); + pkt->getAddr(), ld_inst->seqNum); // Mark the load for re-execution ld_inst->fault = new ReExec; } else { + DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", + pkt->getAddr(), ld_inst->seqNum); + + // Make sure that we don't lose a snoop hitting a LOCKED + // address since the LOCK* flags don't get updated until + // commit. + if (ld_inst->memReqFlags & Request::LLSC) + TheISA::handleLockedSnoopHit(ld_inst.get()); + // If a older load checks this and it's true // then we might have missed the snoop // in which case we need to invalidate to be sure @@ -849,7 +869,7 @@ LSQUnit<Impl>::writebackStores() // misc regs normally updates the result, but this is not // the desired behavior when handling store conditionals. inst->recordResult(false); - bool success = TheISA::handleLockedWrite(inst.get(), req); + bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); inst->recordResult(true); if (!success) { diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 617e845a5..b1efbc5ce 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 ARM Limited + * Copyright (c) 2012-2013 ARM Limited * All rights reserved. * * The license below extends only to copyright in the software and shall @@ -278,6 +278,36 @@ AtomicSimpleCPU::suspendContext(ThreadID thread_num) } +Tick +AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) +{ + DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), + pkt->cmdString()); + + // if snoop invalidates, release any associated locks + if (pkt->isInvalidate()) { + DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", + pkt->getAddr()); + TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); + } + + return 0; +} + +void +AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) +{ + DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), + pkt->cmdString()); + + // if snoop invalidates, release any associated locks + if (pkt->isInvalidate()) { + DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", + pkt->getAddr()); + TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); + } +} + Fault AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, unsigned flags) @@ -402,7 +432,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, if (req->isLLSC()) { cmd = MemCmd::StoreCondReq; - do_access = TheISA::handleLockedWrite(thread, req); + do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); } else if (req->isSwap()) { cmd = MemCmd::SwapReq; if (req->isCondSwap()) { diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh index 7366213f8..7426139e7 100644 --- a/src/cpu/simple/atomic.hh +++ b/src/cpu/simple/atomic.hh @@ -147,17 +147,12 @@ class AtomicSimpleCPU : public BaseSimpleCPU public: - AtomicCPUPort(const std::string &_name, BaseCPU* _cpu) + AtomicCPUPort(const std::string &_name, BaseSimpleCPU* _cpu) : MasterPort(_name, _cpu) { } protected: - - virtual Tick recvAtomicSnoop(PacketPtr pkt) - { - // Snooping a coherence request, just return - return 0; - } + virtual Tick recvAtomicSnoop(PacketPtr pkt) { return 0; } bool recvTimingResp(PacketPtr pkt) { @@ -172,8 +167,30 @@ class AtomicSimpleCPU : public BaseSimpleCPU }; + class AtomicCPUDPort : public AtomicCPUPort + { + + public: + + AtomicCPUDPort(const std::string &_name, BaseSimpleCPU* _cpu) + : AtomicCPUPort(_name, _cpu), cpu(_cpu) + { + cacheBlockMask = ~(cpu->cacheLineSize() - 1); + } + + bool isSnooping() const { return true; } + + Addr cacheBlockMask; + protected: + BaseSimpleCPU *cpu; + + virtual Tick recvAtomicSnoop(PacketPtr pkt); + virtual void recvFunctionalSnoop(PacketPtr pkt); + }; + + AtomicCPUPort icachePort; - AtomicCPUPort dcachePort; + AtomicCPUDPort dcachePort; bool fastmem; Request ifetch_req; diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 7996a6ddd..366164e36 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2012 ARM Limited + * Copyright (c) 2010-2013 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -96,6 +96,7 @@ TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) } + TimingSimpleCPU::~TimingSimpleCPU() { } @@ -273,7 +274,7 @@ TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, bool do_access = true; // flag to suppress cache access if (req->isLLSC()) { - do_access = TheISA::handleLockedWrite(thread, req); + do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); } else if (req->isCondSwap()) { assert(res); req->setExtraData(*res); @@ -813,6 +814,13 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt) advanceInst(fault); } +void +TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt) +{ + TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); +} + + bool TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt) { diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh index 03264315e..4a5a20429 100644 --- a/src/cpu/simple/timing.hh +++ b/src/cpu/simple/timing.hh @@ -165,7 +165,7 @@ class TimingSimpleCPU : public BaseSimpleCPU /** * Snooping a coherence request, do nothing. */ - virtual void recvTimingSnoopReq(PacketPtr pkt) { } + virtual void recvTimingSnoopReq(PacketPtr pkt) {} TimingSimpleCPU* cpu; @@ -217,10 +217,18 @@ class TimingSimpleCPU : public BaseSimpleCPU DcachePort(TimingSimpleCPU *_cpu) : TimingCPUPort(_cpu->name() + ".dcache_port", _cpu), tickEvent(_cpu) - { } + { + cacheBlockMask = ~(cpu->cacheLineSize() - 1); + } + Addr cacheBlockMask; protected: + /** Snoop a coherence request, we need to check if this causes + * a wakeup event on a cpu that is monitoring an address + */ + virtual void recvTimingSnoopReq(PacketPtr pkt); + virtual bool recvTimingResp(PacketPtr pkt); virtual void recvRetry(); |