diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/mem/cache/cache.hh | 6 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 62 | ||||
-rw-r--r-- | src/mem/cache/coherence/coherence_protocol.cc | 3 |
3 files changed, 65 insertions, 6 deletions
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh index 722ce216b..3e45c85d2 100644 --- a/src/mem/cache/cache.hh +++ b/src/mem/cache/cache.hh @@ -227,6 +227,12 @@ class Cache : public BaseCache BlkType* handleAccess(PacketPtr &pkt, int & lat, PacketList & writebacks, bool update = true); + + /** + *Handle doing the Compare and Swap function for SPARC. + */ + void cmpAndSwap(BlkType *blk, PacketPtr &pkt); + /** * Populates a cache block and handles all outstanding requests for the * satisfied fill request. This version takes an MSHR pointer and uses its diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 5c6ab0950..9368e7648 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -122,12 +122,15 @@ Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat, if (blk != NULL) { if (!update) { + if (pkt->isWrite()){ assert(offset < blkSize); assert(pkt->getSize() <= blkSize); assert(offset+pkt->getSize() <= blkSize); std::memcpy(blk->data + offset, pkt->getPtr<uint8_t>(), pkt->getSize()); + } else if (pkt->isReadWrite()) { + cmpAndSwap(blk, pkt); } else if (!(pkt->flags & SATISFIED)) { pkt->flags |= SATISFIED; pkt->result = Packet::Success; @@ -154,7 +157,8 @@ Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat, } } - if ((pkt->isWrite() && blk->isWritable()) || + if ((pkt->isReadWrite() && blk->isWritable()) || + (pkt->isWrite() && blk->isWritable()) || (pkt->isRead() && blk->isValid())) { // We are satisfying the request @@ -180,13 +184,15 @@ Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat, std::memcpy(blk->data + offset, pkt->getPtr<uint8_t>(), pkt->getSize()); } + } else if (pkt->isReadWrite()) { + cmpAndSwap(blk, pkt); } else { assert(pkt->isRead()); if (pkt->req->isLocked()) { blk->trackLoadLocked(pkt->req); } std::memcpy(pkt->getPtr<uint8_t>(), blk->data + offset, - pkt->getSize()); + pkt->getSize()); } if (write_data || @@ -215,6 +221,44 @@ Cache<TagStore,Coherence>::handleAccess(PacketPtr &pkt, int & lat, } template<class TagStore, class Coherence> +void +Cache<TagStore,Coherence>::cmpAndSwap(BlkType *blk, PacketPtr &pkt){ + uint64_t overwrite_val; + bool overwrite_mem; + uint64_t condition_val64; + uint32_t condition_val32; + + int offset = tags->extractBlkOffset(pkt->getAddr()); + + assert(sizeof(uint64_t) >= pkt->getSize()); + + overwrite_mem = true; + // keep a copy of our possible write value, and copy what is at the + // memory address into the packet + std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); + std::memcpy(pkt->getPtr<uint8_t>(), blk->data + offset, + pkt->getSize()); + + if (pkt->req->isCondSwap()) { + if (pkt->getSize() == sizeof(uint64_t)) { + condition_val64 = pkt->req->getExtraData(); + overwrite_mem = !std::memcmp(&condition_val64, blk->data + offset, + sizeof(uint64_t)); + } else if (pkt->getSize() == sizeof(uint32_t)) { + condition_val32 = (uint32_t)pkt->req->getExtraData(); + overwrite_mem = !std::memcmp(&condition_val32, blk->data + offset, + sizeof(uint32_t)); + } else + panic("Invalid size for conditional read/write\n"); + } + + if (overwrite_mem) + std::memcpy(blk->data + offset, + &overwrite_val, pkt->getSize()); + +} + +template<class TagStore, class Coherence> typename Cache<TagStore,Coherence>::BlkType* Cache<TagStore,Coherence>::handleFill(BlkType *blk, PacketPtr &pkt, CacheBlk::State new_state, @@ -244,8 +288,9 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, PacketPtr &pkt, blk = NULL; } - if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) { - assert(target->isWrite() || target->isRead()); + if (blk && ((target->isWrite() || target->isReadWrite()) ? + blk->isWritable() : blk->isValid())) { + assert(target->isWrite() || target->isReadWrite() || target->isRead()); assert(target->getOffset(blkSize) + target->getSize() <= blkSize); if (target->isWrite()) { if (blk->checkWrite(pkt->req)) { @@ -253,6 +298,8 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, PacketPtr &pkt, std::memcpy(blk->data + target->getOffset(blkSize), target->getPtr<uint8_t>(), target->getSize()); } + } else if (target->isReadWrite()) { + cmpAndSwap(blk, pkt); } else { if (pkt->req->isLocked()) { blk->trackLoadLocked(pkt->req); @@ -332,8 +379,9 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr, continue; } - if (blk && (target->isWrite() ? blk->isWritable() : blk->isValid())) { - assert(target->isWrite() || target->isRead()); + if (blk && ((target->isWrite() || target->isReadWrite()) ? + blk->isWritable() : blk->isValid())) { + assert(target->isWrite() || target->isRead() || target->isReadWrite() ); assert(target->getOffset(blkSize) + target->getSize() <= blkSize); if (target->isWrite()) { if (blk->checkWrite(pkt->req)) { @@ -341,6 +389,8 @@ Cache<TagStore,Coherence>::handleFill(BlkType *blk, MSHR * mshr, std::memcpy(blk->data + target->getOffset(blkSize), target->getPtr<uint8_t>(), target->getSize()); } + } else if (target->isReadWrite()) { + cmpAndSwap(blk, pkt); } else { if (target->req->isLocked()) { blk->trackLoadLocked(target->req); diff --git a/src/mem/cache/coherence/coherence_protocol.cc b/src/mem/cache/coherence/coherence_protocol.cc index e8520401d..33a8a4e63 100644 --- a/src/mem/cache/coherence/coherence_protocol.cc +++ b/src/mem/cache/coherence/coherence_protocol.cc @@ -295,9 +295,12 @@ CoherenceProtocol::CoherenceProtocol(const string &name, tt[Invalid][MC::ReadReq].onRequest(MC::ReadReq); // we only support write allocate right now tt[Invalid][MC::WriteReq].onRequest(MC::ReadExReq); + tt[Invalid][MC::SwapReq].onRequest(MC::ReadExReq); tt[Shared][MC::WriteReq].onRequest(writeToSharedCmd); + tt[Shared][MC::SwapReq].onRequest(writeToSharedCmd); if (hasOwned) { tt[Owned][MC::WriteReq].onRequest(writeToSharedCmd); + tt[Owned][MC::SwapReq].onRequest(writeToSharedCmd); } // Prefetching causes a read |