summaryrefslogtreecommitdiff
path: root/src/mem/abstract_mem.cc
diff options
context:
space:
mode:
authorMatt Evans <matt.evans@arm.com>2012-06-29 11:19:05 -0400
committerMatt Evans <matt.evans@arm.com>2012-06-29 11:19:05 -0400
commit579047c76d9c9baa0cb80f7e7603a2e9d3c50376 (patch)
tree37e7de48951adfd5831d037fd097106fac723bd2 /src/mem/abstract_mem.cc
parent3965ecc36b3d928cf8f6a66e50eed3c6de1a54c0 (diff)
downloadgem5-579047c76d9c9baa0cb80f7e7603a2e9d3c50376.tar.xz
Mem: Fix a livelock resulting in LLSC/locked memory access implementation.
Currently when multiple CPUs perform a load-linked/store-conditional sequence, the loads all create a list of reservations which is then scanned when the stores occur. A reservation matching the context and address of the store is sought, BUT all reservations matching the address are also erased at this point. The upshot is that a store-conditional will remove all reservations even if the store itself does not succeed. A livelock was observed using 7-8 CPUs where a thread would erase the reservations of other threads, not succeed, loop and put its own reservation in again only to have it blown by another thread that unsuccessfully now tries to store-conditional -- no forward progress was made, hanging the system. The correct way to do this is to only blow a reservation when a store (conditional or not) actually /occurs/ to its address. One thread always wins (the one that does the store-conditional first).
Diffstat (limited to 'src/mem/abstract_mem.cc')
-rw-r--r--src/mem/abstract_mem.cc61
1 files changed, 34 insertions, 27 deletions
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index c84d6a50a..0742f3f8e 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -270,43 +270,50 @@ AbstractMemory::checkLockedAddrList(PacketPtr pkt)
// Initialize return value. Non-conditional stores always
// succeed. Assume conditional stores will fail until proven
// otherwise.
- bool success = !isLLSC;
+ bool allowStore = !isLLSC;
- // Iterate over list. Note that there could be multiple matching
- // records, as more than one context could have done a load locked
- // to this location.
+ // Iterate over list. Note that there could be multiple matching records,
+ // as more than one context could have done a load locked to this location.
+ // Only remove records when we succeed in finding a record for (xc, addr);
+ // then, remove all records with this address. Failed store-conditionals do
+ // not blow unrelated reservations.
list<LockedAddr>::iterator i = lockedAddrList.begin();
- while (i != lockedAddrList.end()) {
-
- if (i->addr == paddr) {
- // we have a matching address
-
- if (isLLSC && i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
+ if (isLLSC) {
+ while (i != lockedAddrList.end()) {
+ if (i->addr == paddr && i->matchesContext(req)) {
+ // it's a store conditional, and as far as the memory system can
+ // tell, the requesting context's lock is still valid.
DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
req->contextId(), paddr);
- success = true;
+ allowStore = true;
+ break;
}
-
- // Get rid of our record of this lock and advance to next
- DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
- i->contextId, paddr);
- i = lockedAddrList.erase(i);
- }
- else {
- // no match: advance to next record
- ++i;
+ // If we didn't find a match, keep searching! Someone else may well
+ // have a reservation on this line here but we may find ours in just
+ // a little while.
+ i++;
}
+ req->setExtraData(allowStore ? 1 : 0);
}
-
- if (isLLSC) {
- req->setExtraData(success ? 1 : 0);
+ // LLSCs that succeeded AND non-LLSC stores both fall into here:
+ if (allowStore) {
+ // We write address paddr. However, there may be several entries with a
+ // reservation on this address (for other contextIds) and they must all
+ // be removed.
+ i = lockedAddrList.begin();
+ while (i != lockedAddrList.end()) {
+ if (i->addr == paddr) {
+ DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
+ i->contextId, paddr);
+ i = lockedAddrList.erase(i);
+ } else {
+ i++;
+ }
+ }
}
- return success;
+ return allowStore;
}