summaryrefslogtreecommitdiff
path: root/src/arch/riscv
diff options
context:
space:
mode:
authorAlec Roelke <ar4jc@virginia.edu>2016-11-30 17:10:28 -0500
committerAlec Roelke <ar4jc@virginia.edu>2016-11-30 17:10:28 -0500
commitee0c261e10c17cb1ce0a1511bb1040318e6d17f9 (patch)
treeb75cbf788a8c4f88dbd09ebb600253c39614b021 /src/arch/riscv
parent84020a8aedd66740c105a06f55412942e9daac30 (diff)
downloadgem5-ee0c261e10c17cb1ce0a1511bb1040318e6d17f9.tar.xz
riscv: [Patch 7/5] Corrected LRSC semantics
RISC-V makes use of load-reserved and store-conditional instructions to enable creation of lock-free concurrent data manipulation as well as ACQUIRE and RELEASE semantics for memory ordering of LR, SC, and AMO instructions (the latter of which do not follow LR/SC semantics). This patch is a correction to patch 4, which added these instructions to the implementation of RISC-V. It modifies locked_mem.hh and the implementations of lr.w, sc.w, lr.d, and sc.d to apply the proper gem5 flags and return the proper values. An important difference between gem5's LLSC semantics and RISC-V's LR/SC ones, beyond the name, is that gem5 uses 0 to indicate failure and 1 to indicate success, while RISC-V is the opposite. Strictly speaking, RISC-V uses 0 to indicate success and nonzero to indicate failure where the value would indicate the error, but currently only 1 is reserved as a failure code by the ISA reference. This is the seventh patch in the series which originally consisted of five patches that added the RISC-V ISA to gem5. The original five patches added all of the instructions and added support for more detailed CPU models and the sixth patch corrected the implementations of Linux constants and structs. There will be an eighth patch that adds some regression tests for the instructions. [Removed some commented-out code from locked_mem.hh.] Signed-off by: Alec Roelke Signed-off by: Jason Lowe-Power <jason@lowepower.com>
Diffstat (limited to 'src/arch/riscv')
-rw-r--r--src/arch/riscv/isa/decoder.isa4
-rw-r--r--src/arch/riscv/isa/formats/mem.isa7
-rw-r--r--src/arch/riscv/locked_mem.hh107
3 files changed, 56 insertions, 62 deletions
diff --git a/src/arch/riscv/isa/decoder.isa b/src/arch/riscv/isa/decoder.isa
index d98f94629..eac0652c0 100644
--- a/src/arch/riscv/isa/decoder.isa
+++ b/src/arch/riscv/isa/decoder.isa
@@ -218,12 +218,12 @@ decode OPCODE default Unknown::unknown() {
0x3: decode AMOFUNCT {
0x2: LoadReserved::lr_d({{
Rd_sd = Mem_sd;
- }}, aq=AQ, rl=RL);
+ }}, mem_flags=LLSC, aq=AQ, rl=RL);
0x3: StoreCond::sc_d({{
Mem = Rs2;
}}, {{
Rd = result;
- }}, aq=AQ, rl=RL);
+ }}, mem_flags=LLSC, inst_flags=IsStoreConditional, aq=AQ, rl=RL);
format AtomicMemOp {
0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
Mem_sd = Rs2_sd + Rt_sd;
diff --git a/src/arch/riscv/isa/formats/mem.isa b/src/arch/riscv/isa/formats/mem.isa
index bea649c04..69a72dfa8 100644
--- a/src/arch/riscv/isa/formats/mem.isa
+++ b/src/arch/riscv/isa/formats/mem.isa
@@ -363,6 +363,9 @@ def template StoreCondExecute {{
if (fault == NoFault) {
fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
&result);
+ // RISC-V has the opposite convention gem5 has for success flags,
+ // so we invert the result here.
+ result = !result;
}
if (fault == NoFault) {
@@ -385,7 +388,9 @@ def template StoreCondCompleteAcc {{
%(op_dest_decl)s;
- uint64_t result = pkt->req->getExtraData();
+ // RISC-V has the opposite convention gem5 has for success flags,
+ // so we invert the result here.
+ uint64_t result = !pkt->req->getExtraData();
if (fault == NoFault) {
%(postacc_code)s;
diff --git a/src/arch/riscv/locked_mem.hh b/src/arch/riscv/locked_mem.hh
index a2e48b65c..4a809206c 100644
--- a/src/arch/riscv/locked_mem.hh
+++ b/src/arch/riscv/locked_mem.hh
@@ -48,6 +48,8 @@
#ifndef __ARCH_RISCV_LOCKED_MEM_HH__
#define __ARCH_RISCV_LOCKED_MEM_HH__
+#include <stack>
+
#include "arch/registers.hh"
#include "base/misc.hh"
#include "base/trace.hh"
@@ -60,80 +62,67 @@
*/
namespace RiscvISA
{
-static bool lock_flag = false;
-static Addr lock_addr = 0;
-
-template <class XC>
-inline void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
-{
- if (!lock_flag)
- return;
- DPRINTF(LLSC, "Locked snoop on address %x.\n",
- pkt->getAddr()&cacheBlockMask);
+const int WARN_FAILURE = 10000;
- Addr snoop_addr = pkt->getAddr()&cacheBlockMask;
+// RISC-V allows multiple locks per hart, but each SC has to unlock the most
+// recent one, so we use a stack here.
+static std::stack<Addr> locked_addrs;
- if ((lock_addr&cacheBlockMask) == snoop_addr)
- lock_flag = false;
+template <class XC> inline void
+handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
+{
+ if (locked_addrs.empty())
+ return;
+ Addr snoop_addr = pkt->getAddr() & cacheBlockMask;
+ DPRINTF(LLSC, "Locked snoop on address %x.\n", snoop_addr);
+ if ((locked_addrs.top() & cacheBlockMask) == snoop_addr)
+ locked_addrs.pop();
}
-template <class XC>
-inline void handleLockedRead(XC *xc, Request *req)
+template <class XC> inline void
+handleLockedRead(XC *xc, Request *req)
{
- lock_addr = req->getPaddr()&~0xF;
- lock_flag = true;
- DPRINTF(LLSC, "[cid:%i]: "
- "Load-Link Flag Set & Load-Link Address set to %x.\n",
- req->contextId(), req->getPaddr()&~0xF);
+ locked_addrs.push(req->getPaddr() & ~0xF);
+ DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n",
+ req->contextId(), req->getPaddr() & ~0xF);
}
-template <class XC>
-inline void handleLockedSnoopHit(XC *xc)
+template <class XC> inline void
+handleLockedSnoopHit(XC *xc)
{}
-template <class XC>
-inline bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
+template <class XC> inline bool
+handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
{
- if (req->isUncacheable()) {
- // Funky Turbolaser mailbox access...don't update
- // result register (see stq_c in decoder.isa)
- req->setExtraData(2);
- } else {
- // standard store conditional
- if (!lock_flag || (req->getPaddr()&~0xF) != lock_addr) {
- // Lock flag not set or addr mismatch in CPU;
- // don't even bother sending to memory system
- req->setExtraData(0);
- lock_flag = false;
-
- // the rest of this code is not architectural;
- // it's just a debugging aid to help detect
- // livelock by warning on long sequences of failed
- // store conditionals
- int stCondFailures = xc->readStCondFailures();
- stCondFailures++;
- xc->setStCondFailures(stCondFailures);
- if (stCondFailures % 100000 == 0) {
- warn("%i:"" context %d:"
- " %d consecutive store conditional failures\n",
- curTick(), xc->contextId(), stCondFailures);
- }
+ // Normally RISC-V uses zero to indicate success and nonzero to indicate
+ // failure (right now only 1 is reserved), but in gem5 zero indicates
+ // failure and one indicates success, so here we conform to that (it should
+ // be switched in the instruction's implementation)
- if (!lock_flag){
- DPRINTF(LLSC, "[cid:%i]:"
- " Lock Flag Set, Store Conditional Failed.\n",
- req->contextId());
- } else if ((req->getPaddr() & ~0xf) != lock_addr) {
- DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, "
- "Store Conditional Failed.\n", req->contextId());
- }
- // store conditional failed already, so don't issue it to mem
- return false;
+ DPRINTF(LLSC, "[cid:%d]: locked_addrs empty? %s.\n", req->contextId(),
+ locked_addrs.empty() ? "yes" : "no");
+ if (!locked_addrs.empty()) {
+ DPRINTF(LLSC, "[cid:%d]: addr = %x.\n", req->contextId(),
+ req->getPaddr() & ~0xF);
+ DPRINTF(LLSC, "[cid:%d]: last locked addr = %x.\n", req->contextId(),
+ locked_addrs.top());
+ }
+ if (locked_addrs.empty()
+ || locked_addrs.top() != ((req->getPaddr() & ~0xF))) {
+ req->setExtraData(0);
+ int stCondFailures = xc->readStCondFailures();
+ xc->setStCondFailures(++stCondFailures);
+ if (stCondFailures % WARN_FAILURE == 0) {
+ warn("%i: context %d: %d consecutive SC failures.\n",
+ curTick(), xc->contextId(), stCondFailures);
}
+ return false;
+ }
+ if (req->isUncacheable()) {
+ req->setExtraData(2);
}
-
return true;
}