summaryrefslogtreecommitdiff
path: root/src/arch/riscv/isa
diff options
context:
space:
mode:
authorAlec Roelke <ar4jc@virginia.edu>2017-03-21 12:58:25 -0400
committerAlec Roelke <ar4jc@virginia.edu>2017-04-05 20:21:59 +0000
commita8f1f9811c3fdb1cf59f6d37540ad40e4699561f (patch)
tree2fea02557b6c8d72b1c6c5a411f97fd567c67b50 /src/arch/riscv/isa
parent6b7d30688d44952fcbb98b3e0f2bfc5155f1f9a5 (diff)
downloadgem5-a8f1f9811c3fdb1cf59f6d37540ad40e4699561f.tar.xz
riscv: fix Linux problems with LR and SC ops
Some of the functions in the Linux toolchain that allocate memory make use of paired LR and SC instructions, which didn't work properly for that toolchain. This patch fixes that so attempting to use those functions doesn't cause an endless loop of failed SC instructions. Change-Id: If27696323dd6229a0277818e3744fbdf7180fca7 Reviewed-on: https://gem5-review.googlesource.com/2340 Maintainer: Alec Roelke <ar4jc@virginia.edu> Reviewed-by: Jason Lowe-Power <jason@lowepower.com>
Diffstat (limited to 'src/arch/riscv/isa')
-rw-r--r--src/arch/riscv/isa/decoder.isa8
-rw-r--r--src/arch/riscv/isa/formats/amo.isa292
-rw-r--r--src/arch/riscv/isa/formats/mem.isa86
3 files changed, 295 insertions, 91 deletions
diff --git a/src/arch/riscv/isa/decoder.isa b/src/arch/riscv/isa/decoder.isa
index eac0652c0..2b23c1fe4 100644
--- a/src/arch/riscv/isa/decoder.isa
+++ b/src/arch/riscv/isa/decoder.isa
@@ -170,12 +170,12 @@ decode OPCODE default Unknown::unknown() {
0x2: decode AMOFUNCT {
0x2: LoadReserved::lr_w({{
Rd_sd = Mem_sw;
- }}, mem_flags=LLSC, aq=AQ, rl=RL);
+ }}, mem_flags=LLSC);
0x3: StoreCond::sc_w({{
Mem_uw = Rs2_uw;
}}, {{
Rd = result;
- }}, inst_flags=IsStoreConditional, mem_flags=LLSC, aq=AQ, rl=RL);
+ }}, inst_flags=IsStoreConditional, mem_flags=LLSC);
format AtomicMemOp {
0x0: amoadd_w({{Rt_sd = Mem_sw;}}, {{
Mem_sw = Rs2_sw + Rt_sd;
@@ -218,12 +218,12 @@ decode OPCODE default Unknown::unknown() {
0x3: decode AMOFUNCT {
0x2: LoadReserved::lr_d({{
Rd_sd = Mem_sd;
- }}, mem_flags=LLSC, aq=AQ, rl=RL);
+ }}, mem_flags=LLSC);
0x3: StoreCond::sc_d({{
Mem = Rs2;
}}, {{
Rd = result;
- }}, mem_flags=LLSC, inst_flags=IsStoreConditional, aq=AQ, rl=RL);
+ }}, mem_flags=LLSC, inst_flags=IsStoreConditional);
format AtomicMemOp {
0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
Mem_sd = Rs2_sd + Rt_sd;
diff --git a/src/arch/riscv/isa/formats/amo.isa b/src/arch/riscv/isa/formats/amo.isa
index b837cc9c3..d60c4e0cd 100644
--- a/src/arch/riscv/isa/formats/amo.isa
+++ b/src/arch/riscv/isa/formats/amo.isa
@@ -34,6 +34,33 @@
// Atomic memory operation instructions
//
output header {{
+ class LoadReserved : public RiscvStaticInst
+ {
+ protected:
+ Request::Flags memAccessFlags;
+
+ LoadReserved(const char *mnem, ExtMachInst _machInst,
+ OpClass __opClass)
+ : RiscvStaticInst(mnem, _machInst, __opClass)
+ {}
+
+ std::string
+ generateDisassembly(Addr pc, const SymbolTable *symtab) const;
+ };
+
+ class StoreCond : public RiscvStaticInst
+ {
+ protected:
+ Request::Flags memAccessFlags;
+
+ StoreCond(const char* mnem, ExtMachInst _machInst, OpClass __opClass)
+ : RiscvStaticInst(mnem, _machInst, __opClass)
+ {}
+
+ std::string
+ generateDisassembly(Addr pc, const SymbolTable *symtab) const;
+ };
+
class AtomicMemOp : public RiscvMacroInst
{
protected:
@@ -65,11 +92,29 @@ output header {{
}};
output decoder {{
+ std::string LoadReserved::generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const
+ {
+ std::stringstream ss;
+ ss << mnemonic << ' ' << regName(_destRegIdx[0]) << ", ("
+ << regName(_srcRegIdx[0]) << ')';
+ return ss.str();
+ }
+
+ std::string StoreCond::generateDisassembly(Addr pc,
+ const SymbolTable *symtab) const
+ {
+ std::stringstream ss;
+ ss << mnemonic << ' ' << regName(_destRegIdx[0]) << ", "
+ << regName(_srcRegIdx[1]) << ", ("
+ << regName(_srcRegIdx[0]) << ')';
+ return ss.str();
+ }
+
std::string AtomicMemOp::generateDisassembly(Addr pc,
const SymbolTable *symtab) const
{
std::stringstream ss;
- ss << csprintf("0x%08x", machInst) << ' ';
ss << mnemonic << ' ' << regName(_destRegIdx[0]) << ", "
<< regName(_srcRegIdx[1]) << ", ("
<< regName(_srcRegIdx[0]) << ')';
@@ -85,6 +130,22 @@ output decoder {{
}
}};
+def template LRSCDeclare {{
+ class %(class_name)s : public %(base_class)s
+ {
+ public:
+ %(class_name)s(ExtMachInst machInst);
+
+ %(BasicExecDeclare)s
+
+ %(EACompDeclare)s
+
+ %(InitiateAccDeclare)s
+
+ %(CompleteAccDeclare)s
+ };
+}};
+
def template AtomicMemOpDeclare {{
/**
* Static instruction class for an AtomicMemOp operation
@@ -129,6 +190,18 @@ def template AtomicMemOpDeclare {{
};
}};
+def template LRSCConstructor {{
+ %(class_name)s::%(class_name)s(ExtMachInst machInst):
+ %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
+ {
+ %(constructor)s;
+ if (AQ)
+ memAccessFlags = memAccessFlags | Request::ACQUIRE;
+ if (RL)
+ memAccessFlags = memAccessFlags | Request::RELEASE;
+ }
+}};
+
def template AtomicMemOpMacroConstructor {{
%(class_name)s::%(class_name)s(ExtMachInst machInst)
: %(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
@@ -169,6 +242,67 @@ def template AtomicMemOpMacroDecode {{
return new %(class_name)s(machInst);
}};
+def template LoadReservedExecute {{
+ Fault
+ %(class_name)s::execute(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = readMemAtomic(xc, traceData, EA, Mem, memAccessFlags);
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template StoreCondExecute {{
+ Fault %(class_name)s::execute(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+ uint64_t result;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
+ &result);
+ // RISC-V has the opposite convention gem5 has for success flags,
+ // so we invert the result here.
+ result = !result;
+ }
+
+ if (fault == NoFault) {
+ %(postacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
def template AtomicMemOpLoadExecute {{
Fault %(class_name)s::%(class_name)sLoad::execute(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
@@ -224,6 +358,27 @@ def template AtomicMemOpStoreExecute {{
}
}};
+def template LRSCEACompExecute {{
+ Fault
+ %(class_name)s::eaComp(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ xc->setEA(EA);
+ }
+
+ return fault;
+ }
+}};
+
def template AtomicMemOpLoadEACompExecute {{
Fault %(class_name)s::%(class_name)sLoad::eaComp(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
@@ -264,6 +419,55 @@ def template AtomicMemOpStoreEACompExecute {{
}
}};
+def template LoadReservedInitiateAcc {{
+ Fault
+ %(class_name)s::initiateAcc(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_src_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ fault = initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
+ }
+
+ return fault;
+ }
+}};
+
+def template StoreCondInitiateAcc {{
+ Fault
+ %(class_name)s::initiateAcc(CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Addr EA;
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+ %(ea_code)s;
+
+ if (fault == NoFault) {
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ fault = writeMemTiming(xc, traceData, Mem, EA,
+ memAccessFlags, nullptr);
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
def template AtomicMemOpLoadInitiateAcc {{
Fault %(class_name)s::%(class_name)sLoad::initiateAcc(CPU_EXEC_CONTEXT *xc,
Trace::InstRecord *traceData) const
@@ -311,6 +515,54 @@ def template AtomicMemOpStoreInitiateAcc {{
}
}};
+def template LoadReservedCompleteAcc {{
+ Fault
+ %(class_name)s::completeAcc(PacketPtr pkt, CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Fault fault = NoFault;
+
+ %(op_decl)s;
+ %(op_rd)s;
+
+ getMem(pkt, Mem, traceData);
+
+ if (fault == NoFault) {
+ %(memacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
+def template StoreCondCompleteAcc {{
+ Fault %(class_name)s::completeAcc(Packet *pkt, CPU_EXEC_CONTEXT *xc,
+ Trace::InstRecord *traceData) const
+ {
+ Fault fault = NoFault;
+
+ %(op_dest_decl)s;
+
+ // RISC-V has the opposite convention gem5 has for success flags,
+ // so we invert the result here.
+ uint64_t result = !pkt->req->getExtraData();
+
+ if (fault == NoFault) {
+ %(postacc_code)s;
+ }
+
+ if (fault == NoFault) {
+ %(op_wb)s;
+ }
+
+ return fault;
+ }
+}};
+
def template AtomicMemOpLoadCompleteAcc {{
Fault %(class_name)s::%(class_name)sLoad::completeAcc(PacketPtr pkt,
CPU_EXEC_CONTEXT *xc, Trace::InstRecord *traceData) const
@@ -342,6 +594,44 @@ def template AtomicMemOpStoreCompleteAcc {{
}
}};
+def format LoadReserved(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}},
+ mem_flags=[], inst_flags=[]) {{
+ mem_flags = makeList(mem_flags)
+ inst_flags = makeList(inst_flags)
+ iop = InstObjParams(name, Name, 'LoadReserved',
+ {'ea_code': ea_code, 'memacc_code': memacc_code,
+ 'postacc_code': postacc_code}, inst_flags)
+ iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \
+ '|'.join(['Request::%s' % flag for flag in mem_flags]) + ';'
+
+ header_output = LRSCDeclare.subst(iop)
+ decoder_output = LRSCConstructor.subst(iop)
+ decode_block = BasicDecode.subst(iop)
+ exec_output = LoadReservedExecute.subst(iop) \
+ + LRSCEACompExecute.subst(iop) \
+ + LoadReservedInitiateAcc.subst(iop) \
+ + LoadReservedCompleteAcc.subst(iop)
+}};
+
+def format StoreCond(memacc_code, postacc_code={{ }}, ea_code={{EA = Rs1;}},
+ mem_flags=[], inst_flags=[]) {{
+ mem_flags = makeList(mem_flags)
+ inst_flags = makeList(inst_flags)
+ iop = InstObjParams(name, Name, 'StoreCond',
+ {'ea_code': ea_code, 'memacc_code': memacc_code,
+ 'postacc_code': postacc_code}, inst_flags)
+ iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \
+ '|'.join(['Request::%s' % flag for flag in mem_flags]) + ';'
+
+ header_output = LRSCDeclare.subst(iop)
+ decoder_output = LRSCConstructor.subst(iop)
+ decode_block = BasicDecode.subst(iop)
+ exec_output = StoreCondExecute.subst(iop) \
+ + LRSCEACompExecute.subst(iop) \
+ + StoreCondInitiateAcc.subst(iop) \
+ + StoreCondCompleteAcc.subst(iop)
+}};
+
def format AtomicMemOp(load_code, store_code, ea_code, load_flags=[],
store_flags=[], inst_flags=[]) {{
macro_iop = InstObjParams(name, Name, 'AtomicMemOp', ea_code, inst_flags)
diff --git a/src/arch/riscv/isa/formats/mem.isa b/src/arch/riscv/isa/formats/mem.isa
index 69a72dfa8..2a00850a2 100644
--- a/src/arch/riscv/isa/formats/mem.isa
+++ b/src/arch/riscv/isa/formats/mem.isa
@@ -186,10 +186,6 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
# select templates
- # The InitiateAcc template is the same for StoreCond templates as the
- # corresponding Store template..
- StoreCondInitiateAcc = StoreInitiateAcc
-
fullExecTemplate = eval(exec_template_base + 'Execute')
initiateAccTemplate = eval(exec_template_base + 'InitiateAcc')
completeAccTemplate = eval(exec_template_base + 'CompleteAcc')
@@ -344,66 +340,6 @@ def template StoreCompleteAcc {{
}
}};
-def template StoreCondExecute {{
- Fault %(class_name)s::execute(CPU_EXEC_CONTEXT *xc,
- Trace::InstRecord *traceData) const
- {
- Addr EA;
- Fault fault = NoFault;
- uint64_t result;
-
- %(op_decl)s;
- %(op_rd)s;
- %(ea_code)s;
-
- if (fault == NoFault) {
- %(memacc_code)s;
- }
-
- if (fault == NoFault) {
- fault = writeMemAtomic(xc, traceData, Mem, EA, memAccessFlags,
- &result);
- // RISC-V has the opposite convention gem5 has for success flags,
- // so we invert the result here.
- result = !result;
- }
-
- if (fault == NoFault) {
- %(postacc_code)s;
- }
-
- if (fault == NoFault) {
- %(op_wb)s;
- }
-
- return fault;
- }
-}};
-
-def template StoreCondCompleteAcc {{
- Fault %(class_name)s::completeAcc(Packet *pkt, CPU_EXEC_CONTEXT *xc,
- Trace::InstRecord *traceData) const
- {
- Fault fault = NoFault;
-
- %(op_dest_decl)s;
-
- // RISC-V has the opposite convention gem5 has for success flags,
- // so we invert the result here.
- uint64_t result = !pkt->req->getExtraData();
-
- if (fault == NoFault) {
- %(postacc_code)s;
- }
-
- if (fault == NoFault) {
- %(op_wb)s;
- }
-
- return fault;
- }
-}};
-
def format Load(memacc_code, ea_code = {{EA = Rs1 + ldisp;}}, mem_flags=[],
inst_flags=[]) {{
(header_output, decoder_output, decode_block, exec_output) = \
@@ -417,25 +353,3 @@ def format Store(memacc_code, ea_code={{EA = Rs1 + sdisp;}}, mem_flags=[],
LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
'Store', exec_template_base='Store')
}};
-
-def format StoreCond(memacc_code, postacc_code, ea_code={{EA = Rs1;}},
- mem_flags=[], inst_flags=[], aq=0, rl=0) {{
- if aq:
- mem_flags = makeList(mem_flags) + ["ACQUIRE"]
- if rl:
- mem_flags = makeList(mem_flags) + ["RELEASE"]
- (header_output, decoder_output, decode_block, exec_output) = LoadStoreBase(
- name, Name, ea_code, memacc_code, mem_flags, inst_flags, 'Store',
- postacc_code, exec_template_base='StoreCond')
-}};
-
-def format LoadReserved(memacc_code, ea_code={{EA = Rs1;}}, mem_flags=[],
- inst_flags=[], aq=0, rl=0) {{
- if aq:
- mem_flags = makeList(mem_flags) + ["ACQUIRE"]
- if rl:
- mem_flags = makeList(mem_flags) + ["RELEASE"]
- (header_output, decoder_output, decode_block, exec_output) = LoadStoreBase(
- name, Name, ea_code, memacc_code, mem_flags, inst_flags, 'Load',
- exec_template_base='Load')
-}};