summaryrefslogtreecommitdiff
path: root/src/arch/riscv
diff options
context:
space:
mode:
authorAlec Roelke <ar4jc@virginia.edu>2017-06-14 17:33:29 -0400
committerAlec Roelke <ar4jc@virginia.edu>2017-07-11 03:45:14 +0000
commit7e6a35374a944b67868d92ce85b427ea9103ca53 (patch)
tree0fe3c97c11967468b2c66ce0edbc656d3c485a61 /src/arch/riscv
parent63d4005a29dea37e0219444a3de2cdb25289fdfb (diff)
downloadgem5-7e6a35374a944b67868d92ce85b427ea9103ca53.tar.xz
arch-riscv: Add support for compressed extension RV64C
This patch adds compatibility with the 64-bit compressed extension to the RISC-V ISA, RV64C. Current versions of the toolchain may use compressed instructions in glibc by default, which can only be overridden by recompiling the entire toolchain (simply adding "-march=rv64g" or "-march=rv64imafd" when compiling a binary is not sufficient to use uncompressed instructions in glibc functions in the binary). [Update diassembly generation for new RegId type.] [Rebase onto master.] Change-Id: Ifd5a5ea746704ce7e1b111442c3eb84c509a98b4 Reviewed-on: https://gem5-review.googlesource.com/3860 Reviewed-by: Alec Roelke <ar4jc@virginia.edu> Maintainer: Alec Roelke <ar4jc@virginia.edu>
Diffstat (limited to 'src/arch/riscv')
-rw-r--r--src/arch/riscv/decoder.cc61
-rw-r--r--src/arch/riscv/decoder.hh64
-rw-r--r--src/arch/riscv/isa/bitfields.isa26
-rw-r--r--src/arch/riscv/isa/decoder.isa2611
-rw-r--r--src/arch/riscv/isa/formats/compressed.isa102
-rw-r--r--src/arch/riscv/isa/formats/formats.isa5
-rw-r--r--src/arch/riscv/isa/formats/mem.isa41
-rw-r--r--src/arch/riscv/isa/includes.isa2
-rw-r--r--src/arch/riscv/isa/operands.isa12
-rw-r--r--src/arch/riscv/types.hh32
10 files changed, 1730 insertions, 1226 deletions
diff --git a/src/arch/riscv/decoder.cc b/src/arch/riscv/decoder.cc
index acda6d04f..36504f4f8 100644
--- a/src/arch/riscv/decoder.cc
+++ b/src/arch/riscv/decoder.cc
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012 Google
+ * Copyright (c) The University of Virginia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,13 +27,71 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Gabe Black
+ * Alec Roelke
*/
#include "arch/riscv/decoder.hh"
+#include "arch/riscv/types.hh"
+#include "debug/Decode.hh"
namespace RiscvISA
{
-GenericISA::BasicDecodeCache Decoder::defaultCache;
+void
+Decoder::moreBytes(const PCState &pc, Addr fetchPC, MachInst inst)
+{
+ DPRINTF(Decode, "Getting bytes 0x%08x from address %#x\n",
+ inst, pc.pc());
+
+ bool aligned = pc.pc() % sizeof(MachInst) == 0;
+ if (mid) {
+ assert(!aligned);
+ emi |= (inst & 0xFFFF) << 16;
+ instDone = true;
+ } else {
+ MachInst instChunk = aligned ? inst & 0xFFFF :
+ (inst & 0xFFFF0000) >> 16;
+ if (aligned) {
+ emi = (inst & 0x3) < 0x3 ? instChunk : inst;
+ instDone = true;
+ } else {
+ emi = instChunk;
+ instDone = (instChunk & 0x3) < 0x3;
+ }
+ }
+ mid = !instDone;
+}
+
+StaticInstPtr
+Decoder::decode(ExtMachInst mach_inst, Addr addr)
+{
+ DPRINTF(Decode, "Decoding instruction 0x%08x at address %#x\n",
+ mach_inst, addr);
+ if (instMap.find(mach_inst) != instMap.end())
+ return instMap[mach_inst];
+ else {
+ StaticInstPtr si = decodeInst(mach_inst);
+ instMap[mach_inst] = si;
+ return si;
+ }
+}
+
+StaticInstPtr
+Decoder::decode(RiscvISA::PCState &nextPC)
+{
+ if (!instDone)
+ return nullptr;
+ instDone = false;
+
+ if ((emi & 0x3) < 0x3) {
+ nextPC.compressed(true);
+ nextPC.npc(nextPC.pc() + sizeof(MachInst)/2);
+ } else {
+ nextPC.compressed(false);
+ nextPC.npc(nextPC.pc() + sizeof(MachInst));
+ }
+
+ return decode(emi, nextPC.instAddr());
+}
}
diff --git a/src/arch/riscv/decoder.hh b/src/arch/riscv/decoder.hh
index b1d91d610..ef644fa13 100644
--- a/src/arch/riscv/decoder.hh
+++ b/src/arch/riscv/decoder.hh
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012 Google
+ * Copyright (c) 2017 The University of Virginia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,16 +27,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Gabe Black
+ * Alec Roelke
*/
#ifndef __ARCH_RISCV_DECODER_HH__
#define __ARCH_RISCV_DECODER_HH__
#include "arch/generic/decode_cache.hh"
+#include "arch/riscv/isa_traits.hh"
#include "arch/riscv/types.hh"
#include "base/misc.hh"
#include "base/types.hh"
#include "cpu/static_inst.hh"
+#include "debug/Decode.hh"
namespace RiscvISA
{
@@ -43,73 +47,39 @@ namespace RiscvISA
class ISA;
class Decoder
{
+ private:
+ DecodeCache::InstMap instMap;
+ bool mid;
+
protected:
//The extended machine instruction being generated
ExtMachInst emi;
bool instDone;
public:
- Decoder(ISA* isa = nullptr) : instDone(false)
+ Decoder(ISA* isa=nullptr)
+ : mid(false), emi(NoopMachInst), instDone(false)
{}
- void
- process()
- {
- }
-
- void
- reset()
- {
- instDone = false;
- }
+ void process() {}
+ void reset() { instDone = false; }
//Use this to give data to the decoder. This should be used
//when there is control flow.
- void
- moreBytes(const PCState &pc, Addr fetchPC, MachInst inst)
- {
- emi = inst;
- instDone = true;
- }
-
- bool
- needMoreBytes()
- {
- return true;
- }
-
- bool
- instReady()
- {
- return instDone;
- }
+ void moreBytes(const PCState &pc, Addr fetchPC, MachInst inst);
+ bool needMoreBytes() { return true; }
+ bool instReady() { return instDone; }
void takeOverFrom(Decoder *old) {}
- protected:
- /// A cache of decoded instruction objects.
- static GenericISA::BasicDecodeCache defaultCache;
-
- public:
StaticInstPtr decodeInst(ExtMachInst mach_inst);
/// Decode a machine instruction.
/// @param mach_inst The binary instruction to decode.
/// @retval A pointer to the corresponding StaticInst object.
- StaticInstPtr
- decode(ExtMachInst mach_inst, Addr addr)
- {
- return defaultCache.decode(this, mach_inst, addr);
- }
+ StaticInstPtr decode(ExtMachInst mach_inst, Addr addr);
- StaticInstPtr
- decode(RiscvISA::PCState &nextPC)
- {
- if (!instDone)
- return nullptr;
- instDone = false;
- return decode(emi, nextPC.instAddr());
- }
+ StaticInstPtr decode(RiscvISA::PCState &nextPC);
};
} // namespace RiscvISA
diff --git a/src/arch/riscv/isa/bitfields.isa b/src/arch/riscv/isa/bitfields.isa
index 23099a5e8..8372ed973 100644
--- a/src/arch/riscv/isa/bitfields.isa
+++ b/src/arch/riscv/isa/bitfields.isa
@@ -35,8 +35,8 @@
// Bitfield definitions.
//
-def bitfield OPCODE <6:0>;
-def bitfield NONOPCODE <31:7>;
+def bitfield QUADRANT <1:0>;
+def bitfield OPCODE <6:2>;
// R-Type
def bitfield ALL <31:0>;
@@ -69,7 +69,7 @@ def bitfield BIMM12BITS10TO5 <30:25>;
// UJ-Type
def bitfield UJIMMBITS10TO1 <30:21>;
-def bitfield UJIMMBIT11 <20>;
+def bitfield UJIMMBIT11 <20>;
def bitfield UJIMMBITS19TO12 <19:12>;
// System
@@ -90,3 +90,23 @@ def bitfield FUNCT2 <26:25>;
def bitfield AMOFUNCT <31:27>;
def bitfield AQ <26>;
def bitfield RL <25>;
+
+// Compressed
+def bitfield COPCODE <15:13>;
+def bitfield CFUNCT1 <12>;
+def bitfield CFUNCT2HIGH <11:10>;
+def bitfield CFUNCT2LOW <6:5>;
+def bitfield RC1 <11:7>;
+def bitfield RC2 <6:2>;
+def bitfield RP1 <9:7>;
+def bitfield RP2 <4:2>;
+def bitfield FC1 <11:7>;
+def bitfield FC2 <6:2>;
+def bitfield FP2 <4:2>;
+def bitfield CJUMPIMM <12:2>;
+def bitfield CIMM8 <12:5>;
+def bitfield CIMM6 <12:7>;
+def bitfield CIMM5 <6:2>;
+def bitfield CIMM3 <12:10>;
+def bitfield CIMM2 <6:5>;
+def bitfield CIMM1 <12>;
diff --git a/src/arch/riscv/isa/decoder.isa b/src/arch/riscv/isa/decoder.isa
index 8056d9615..4f4ef7636 100644
--- a/src/arch/riscv/isa/decoder.isa
+++ b/src/arch/riscv/isa/decoder.isa
@@ -1,7 +1,7 @@
// -*- mode:c++ -*-
// Copyright (c) 2015 RISC-V Foundation
-// Copyright (c) 2016 The University of Virginia
+// Copyright (c) 2017 The University of Virginia
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -34,1351 +34,1654 @@
// The RISC-V ISA decoder
//
-decode OPCODE default Unknown::unknown() {
- 0x03: decode FUNCT3 {
- format Load {
- 0x0: lb({{
- Rd_sd = Mem_sb;
- }});
- 0x1: lh({{
- Rd_sd = Mem_sh;
- }});
- 0x2: lw({{
- Rd_sd = Mem_sw;
- }});
- 0x3: ld({{
- Rd_sd = Mem_sd;
- }});
- 0x4: lbu({{
- Rd = Mem_ub;
+decode QUADRANT default Unknown::unknown() {
+ 0x0: decode COPCODE {
+ 0x0: CUIOp::c_addi4spn({{
+ imm = CIMM8<1:1> << 2 |
+ CIMM8<0:0> << 3 |
+ CIMM8<7:6> << 4 |
+ CIMM8<5:2> << 6;
+ }}, {{
+ assert(imm != 0);
+ Rp2 = sp + imm;
+ }});
+ format CompressedLoad {
+ 0x1: c_fld({{
+ ldisp = CIMM3 << 3 | CIMM2 << 6;
+ }}, {{
+ Fp2_bits = Mem;
+ }}, {{
+ EA = Rp1 + ldisp;
}});
- 0x5: lhu({{
- Rd = Mem_uh;
+ 0x2: c_lw({{
+ ldisp = CIMM2<1:1> << 2 |
+ CIMM3 << 3 |
+ CIMM2<0:0> << 6;
+ }}, {{
+ Rp2_sd = Mem_sw;
+ }}, {{
+ EA = Rp1 + ldisp;
}});
- 0x6: lwu({{
- Rd = Mem_uw;
+ 0x3: c_ld({{
+ ldisp = CIMM3 << 3 | CIMM2 << 6;
+ }}, {{
+ Rp2_sd = Mem_sd;
+ }}, {{
+ EA = Rp1 + ldisp;
}});
}
- }
-
- 0x07: decode FUNCT3 {
- format Load {
- 0x2: flw({{
- Fd_bits = (uint64_t)Mem_uw;
+ format CompressedStore {
+ 0x5: c_fsd({{
+ sdisp = CIMM3 << 3 | CIMM2 << 6;
+ }}, {{
+ Mem = Fp2_bits;
+ }}, {{
+ EA = Rp1 + sdisp;
}});
- 0x3: fld({{
- Fd_bits = Mem;
+ 0x6: c_sw({{
+ sdisp = CIMM2<1:1> << 2 |
+ CIMM3 << 3 |
+ CIMM2<0:0> << 6;
+ }}, {{
+ Mem_uw = Rp2_uw;
+ }}, ea_code={{
+ EA = Rp1 + sdisp;
+ }});
+ 0x7: c_sd({{
+ sdisp = CIMM3 << 3 | CIMM2 << 6;
+ }}, {{
+ Mem_ud = Rp2_ud;
+ }}, {{
+ EA = Rp1 + sdisp;
}});
}
}
-
- 0x0f: decode FUNCT3 {
- format IOp {
- 0x0: fence({{
- }}, IsNonSpeculative, IsMemBarrier, No_OpClass);
- 0x1: fence_i({{
- }}, IsNonSpeculative, IsSerializeAfter, No_OpClass);
- }
- }
-
- 0x13: decode FUNCT3 {
- format IOp {
- 0x0: addi({{
- Rd_sd = Rs1_sd + imm;
- }});
- 0x1: slli({{
- Rd = Rs1 << SHAMT6;
- }});
- 0x2: slti({{
- Rd = (Rs1_sd < imm) ? 1 : 0;
+ 0x1: decode COPCODE {
+ format CIOp {
+ 0x0: c_addi({{
+ imm = CIMM5;
+ if (CIMM1 > 0)
+ imm |= ~((uint64_t)0x1F);
+ }}, {{
+ assert((RC1 == 0) == (imm == 0));
+ Rc1_sd = Rc1_sd + imm;
}});
- 0x3: sltiu({{
- Rd = (Rs1 < (uint64_t)imm) ? 1 : 0;
+ 0x1: c_addiw({{
+ imm = CIMM5;
+ if (CIMM1 > 0)
+ imm |= ~((uint64_t)0x1F);
+ }}, {{
+ assert(RC1 != 0);
+ Rc1_sd = (int32_t)Rc1_sd + imm;
}});
- 0x4: xori({{
- Rd = Rs1 ^ (uint64_t)imm;
+ 0x2: c_li({{
+ imm = CIMM5;
+ if (CIMM1 > 0)
+ imm |= ~((uint64_t)0x1F);
+ }}, {{
+ assert(RC1 != 0);
+ Rc1_sd = imm;
}});
- 0x5: decode SRTYPE {
- 0x0: srli({{
- Rd = Rs1 >> SHAMT6;
+ 0x3: decode RC1 {
+ 0x2: c_addi16sp({{
+ imm = CIMM5<4:4> << 4 |
+ CIMM5<0:0> << 5 |
+ CIMM5<3:3> << 6 |
+ CIMM5<2:1> << 7;
+ if (CIMM1 > 0)
+ imm |= ~((int64_t)0x1FF);
+ }}, {{
+ assert(imm != 0);
+ sp_sd = sp_sd + imm;
}});
- 0x1: srai({{
- Rd_sd = Rs1_sd >> SHAMT6;
+ default: c_lui({{
+ imm = CIMM5 << 12;
+ if (CIMM1 > 0)
+ imm |= ~((uint64_t)0x1FFFF);
+ }}, {{
+ assert(RC1 != 0 && RC1 != 2);
+ assert(imm != 0);
+ Rc1_sd = imm;
}});
}
- 0x6: ori({{
- Rd = Rs1 | (uint64_t)imm;
- }});
- 0x7: andi({{
- Rd = Rs1 & (uint64_t)imm;
- }});
}
- }
-
- 0x17: UOp::auipc({{
- Rd = PC + imm;
- }});
-
- 0x1b: decode FUNCT3 {
- format IOp {
- 0x0: addiw({{
- Rd_sd = (int32_t)Rs1 + (int32_t)imm;
- }});
- 0x1: slliw({{
- Rd_sd = Rs1_sw << SHAMT5;
- }});
- 0x5: decode SRTYPE {
- 0x0: srliw({{
- Rd = Rs1_uw >> SHAMT5;
+ 0x4: decode CFUNCT2HIGH {
+ format CUIOp {
+ 0x0: c_srli({{
+ imm = CIMM5 | (CIMM1 << 5);
+ assert(imm != 0);
+ }}, {{
+ Rp1 = Rp1 >> imm;
}});
- 0x1: sraiw({{
- Rd_sd = Rs1_sw >> SHAMT5;
+ 0x1: c_srai({{
+ imm = CIMM5 | (CIMM1 << 5);
+ assert(imm != 0);
+ }}, {{
+ Rp1_sd = Rp1_sd >> imm;
}});
+ 0x2: c_andi({{
+ imm = CIMM5;
+ if (CIMM1 > 0)
+ imm |= ~((uint64_t)0x1F);
+ }}, {{
+ Rp1 = Rp1 & imm;
+ }});
+ }
+ format ROp {
+ 0x3: decode CFUNCT1 {
+ 0x0: decode CFUNCT2LOW {
+ 0x0: c_sub({{
+ Rp1 = Rp1 - Rp2;
+ }});
+ 0x1: c_xor({{
+ Rp1 = Rp1 ^ Rp2;
+ }});
+ 0x2: c_or({{
+ Rp1 = Rp1 | Rp2;
+ }});
+ 0x3: c_and({{
+ Rp1 = Rp1 & Rp2;
+ }});
+ }
+ 0x1: decode CFUNCT2LOW {
+ 0x0: c_subw({{
+ Rp1_sd = (int32_t)Rp1_sd - Rp2_sw;
+ }});
+ 0x1: c_addw({{
+ Rp1_sd = (int32_t)Rp1_sd + Rp2_sw;
+ }});
+ }
+ }
}
}
- }
+ 0x5: JOp::c_j({{
+ int64_t offset = CJUMPIMM<3:1> << 1 |
+ CJUMPIMM<9:9> << 4 |
+ CJUMPIMM<0:0> << 5 |
+ CJUMPIMM<5:5> << 6 |
+ CJUMPIMM<4:4> << 7 |
+ CJUMPIMM<8:7> << 8 |
+ CJUMPIMM<6:6> << 10;
+ if (CJUMPIMM<10:10> > 0)
+ offset |= ~((int64_t)0x7FF);
+ NPC = PC + offset;
+ }}, IsIndirectControl, IsUncondControl, IsCall);
+ format BOp {
+ 0x6: c_beqz({{
+ int64_t offset = CIMM5<2:1> << 1 |
+ CIMM3<1:0> << 3 |
+ CIMM5<0:0> << 5 |
+ CIMM5<4:3> << 6;
+ if (CIMM3<2:2> > 0)
+ offset |= ~((int64_t)0xFF);
- 0x23: decode FUNCT3 {
- format Store {
- 0x0: sb({{
- Mem_ub = Rs2_ub;
- }});
- 0x1: sh({{
- Mem_uh = Rs2_uh;
- }});
- 0x2: sw({{
- Mem_uw = Rs2_uw;
- }});
- 0x3: sd({{
- Mem_ud = Rs2_ud;
- }});
+ if (Rp1 == 0)
+ NPC = PC + offset;
+ else
+ NPC = NPC;
+ }}, IsDirectControl, IsCondControl);
+ 0x7: c_bnez({{
+ int64_t offset = CIMM5<2:1> << 1 |
+ CIMM3<1:0> << 3 |
+ CIMM5<0:0> << 5 |
+ CIMM5<4:3> << 6;
+ if (CIMM3<2:2> > 0)
+ offset |= ~((int64_t)0xFF);
+
+ if (Rp1 != 0)
+ NPC = PC + offset;
+ else
+ NPC = NPC;
+ }}, IsDirectControl, IsCondControl);
}
}
-
- 0x27: decode FUNCT3 {
- format Store {
- 0x2: fsw({{
- Mem_uw = (uint32_t)Fs2_bits;
+ 0x2: decode COPCODE {
+ 0x0: CUIOp::c_slli({{
+ imm = CIMM5 | (CIMM1 << 5);
+ assert(imm != 0);
+ }}, {{
+ assert(RC1 != 0);
+ Rc1 = Rc1 << imm;
+ }});
+ format CompressedLoad {
+ 0x1: c_fldsp({{
+ ldisp = CIMM5<4:3> << 3 |
+ CIMM1 << 5 |
+ CIMM5<2:0> << 6;
+ }}, {{
+ Fc1_bits = Mem;
+ }}, {{
+ EA = sp + ldisp;
}});
- 0x3: fsd({{
- Mem_ud = Fs2_bits;
+ 0x2: c_lwsp({{
+ ldisp = CIMM5<4:2> << 2 |
+ CIMM1 << 5 |
+ CIMM5<1:0> << 6;
+ }}, {{
+ assert(RC1 != 0);
+ Rc1_sd = Mem_sw;
+ }}, {{
+ EA = sp + ldisp;
}});
- }
- }
-
- 0x2f: decode FUNCT3 {
- 0x2: decode AMOFUNCT {
- 0x2: LoadReserved::lr_w({{
- Rd_sd = Mem_sw;
- }}, mem_flags=LLSC);
- 0x3: StoreCond::sc_w({{
- Mem_uw = Rs2_uw;
+ 0x3: c_ldsp({{
+ ldisp = CIMM5<4:3> << 3 |
+ CIMM1 << 5 |
+ CIMM5<2:0> << 6;
+ }}, {{
+ assert(RC1 != 0);
+ Rc1_sd = Mem_sd;
}}, {{
- Rd = result;
- }}, inst_flags=IsStoreConditional, mem_flags=LLSC);
- format AtomicMemOp {
- 0x0: amoadd_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_sw + Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1: amoswap_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x4: amoxor_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw^Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x8: amoor_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw | Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0xc: amoand_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = Rs2_uw&Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x10: amomin_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = std::min<int32_t>(Rs2_sw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x14: amomax_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = std::max<int32_t>(Rs2_sw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x18: amominu_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = std::min<uint32_t>(Rs2_uw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1c: amomaxu_w({{Rt_sd = Mem_sw;}}, {{
- Mem_sw = std::max<uint32_t>(Rs2_uw, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
+ EA = sp + ldisp;
+ }});
+ }
+ 0x4: decode CFUNCT1 {
+ 0x0: decode RC2 {
+ 0x0: Jump::c_jr({{
+ assert(RC1 != 0);
+ NPC = Rc1;
+ }}, IsIndirectControl, IsUncondControl, IsCall);
+ default: CROp::c_mv({{
+ assert(RC1 != 0);
+ Rc1 = Rc2;
+ }});
+ }
+ 0x1: decode RC1 {
+ 0x0: SystemOp::c_ebreak({{
+ assert(RC2 == 0);
+ fault = make_shared<BreakpointFault>();
+ }}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
+ default: decode RC2 {
+ 0x0: Jump::c_jalr({{
+ assert(RC1 != 0);
+ ra = NPC;
+ NPC = Rc1;
+ }}, IsIndirectControl, IsUncondControl, IsCall);
+ default: ROp::c_add({{
+ Rc1_sd = Rc1_sd + Rc2_sd;
+ }});
+ }
}
}
- 0x3: decode AMOFUNCT {
- 0x2: LoadReserved::lr_d({{
- Rd_sd = Mem_sd;
- }}, mem_flags=LLSC);
- 0x3: StoreCond::sc_d({{
- Mem = Rs2;
+ format CompressedStore {
+ 0x5: c_fsdsp({{
+ sdisp = CIMM6<5:3> << 3 |
+ CIMM6<2:0> << 6;
}}, {{
- Rd = result;
- }}, mem_flags=LLSC, inst_flags=IsStoreConditional);
- format AtomicMemOp {
- 0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = Rs2_sd + Rt_sd;
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x1: amoswap_d({{Rt = Mem;}}, {{
- Mem = Rs2;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x4: amoxor_d({{Rt = Mem;}}, {{
- Mem = Rs2^Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x8: amoor_d({{Rt = Mem;}}, {{
- Mem = Rs2 | Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0xc: amoand_d({{Rt = Mem;}}, {{
- Mem = Rs2&Rt;
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x10: amomin_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = std::min(Rs2_sd, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x14: amomax_d({{Rt_sd = Mem_sd;}}, {{
- Mem_sd = std::max(Rs2_sd, Rt_sd);
- Rd_sd = Rt_sd;
- }}, {{EA = Rs1;}});
- 0x18: amominu_d({{Rt = Mem;}}, {{
- Mem = std::min(Rs2, Rt);
- Rd = Rt;
- }}, {{EA = Rs1;}});
- 0x1c: amomaxu_d({{Rt = Mem;}}, {{
- Mem = std::max(Rs2, Rt);
- Rd = Rt;
- }}, {{EA = Rs1;}});
- }
+ Mem_ud = Fc2_bits;
+ }}, {{
+ EA = sp + sdisp;
+ }});
+ 0x6: c_swsp({{
+ sdisp = CIMM6<5:2> << 2 |
+ CIMM6<1:0> << 6;
+ }}, {{
+ Mem_uw = Rc2_uw;
+ }}, {{
+ EA = sp + sdisp;
+ }});
+ 0x7: c_sdsp({{
+ sdisp = CIMM6<5:3> << 3 |
+ CIMM6<2:0> << 6;
+ }}, {{
+ Mem = Rc2;
+ }}, {{
+ EA = sp + sdisp;
+ }});
}
}
- 0x33: decode FUNCT3 {
- format ROp {
- 0x0: decode FUNCT7 {
- 0x0: add({{
- Rd = Rs1_sd + Rs2_sd;
+ 0x3: decode OPCODE {
+ 0x00: decode FUNCT3 {
+ format Load {
+ 0x0: lb({{
+ Rd_sd = Mem_sb;
}});
- 0x1: mul({{
- Rd = Rs1_sd*Rs2_sd;
- }}, IntMultOp);
- 0x20: sub({{
- Rd = Rs1_sd - Rs2_sd;
+ 0x1: lh({{
+ Rd_sd = Mem_sh;
}});
- }
- 0x1: decode FUNCT7 {
- 0x0: sll({{
- Rd = Rs1 << Rs2<5:0>;
+ 0x2: lw({{
+ Rd_sd = Mem_sw;
}});
- 0x1: mulh({{
- bool negate = (Rs1_sd < 0) != (Rs2_sd < 0);
-
- uint64_t Rs1_lo = (uint32_t)std::abs(Rs1_sd);
- uint64_t Rs1_hi = (uint64_t)std::abs(Rs1_sd) >> 32;
- uint64_t Rs2_lo = (uint32_t)std::abs(Rs2_sd);
- uint64_t Rs2_hi = (uint64_t)std::abs(Rs2_sd) >> 32;
-
- uint64_t hi = Rs1_hi*Rs2_hi;
- uint64_t mid1 = Rs1_hi*Rs2_lo;
- uint64_t mid2 = Rs1_lo*Rs2_hi;
- uint64_t lo = Rs2_lo*Rs1_lo;
- uint64_t carry = ((uint64_t)(uint32_t)mid1
- + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
-
- uint64_t res = hi + (mid1 >> 32) + (mid2 >> 32) + carry;
- Rd = negate ? ~res + (Rs1_sd*Rs2_sd == 0 ? 1 : 0) : res;
- }}, IntMultOp);
- }
- 0x2: decode FUNCT7 {
- 0x0: slt({{
- Rd = (Rs1_sd < Rs2_sd) ? 1 : 0;
+ 0x3: ld({{
+ Rd_sd = Mem_sd;
}});
- 0x1: mulhsu({{
- bool negate = Rs1_sd < 0;
- uint64_t Rs1_lo = (uint32_t)std::abs(Rs1_sd);
- uint64_t Rs1_hi = (uint64_t)std::abs(Rs1_sd) >> 32;
- uint64_t Rs2_lo = (uint32_t)Rs2;
- uint64_t Rs2_hi = Rs2 >> 32;
-
- uint64_t hi = Rs1_hi*Rs2_hi;
- uint64_t mid1 = Rs1_hi*Rs2_lo;
- uint64_t mid2 = Rs1_lo*Rs2_hi;
- uint64_t lo = Rs1_lo*Rs2_lo;
- uint64_t carry = ((uint64_t)(uint32_t)mid1
- + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
-
- uint64_t res = hi + (mid1 >> 32) + (mid2 >> 32) + carry;
- Rd = negate ? ~res + (Rs1_sd*Rs2 == 0 ? 1 : 0) : res;
- }}, IntMultOp);
- }
- 0x3: decode FUNCT7 {
- 0x0: sltu({{
- Rd = (Rs1 < Rs2) ? 1 : 0;
+ 0x4: lbu({{
+ Rd = Mem_ub;
}});
- 0x1: mulhu({{
- uint64_t Rs1_lo = (uint32_t)Rs1;
- uint64_t Rs1_hi = Rs1 >> 32;
- uint64_t Rs2_lo = (uint32_t)Rs2;
- uint64_t Rs2_hi = Rs2 >> 32;
-
- uint64_t hi = Rs1_hi*Rs2_hi;
- uint64_t mid1 = Rs1_hi*Rs2_lo;
- uint64_t mid2 = Rs1_lo*Rs2_hi;
- uint64_t lo = Rs1_lo*Rs2_lo;
- uint64_t carry = ((uint64_t)(uint32_t)mid1
- + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
-
- Rd = hi + (mid1 >> 32) + (mid2 >> 32) + carry;
- }}, IntMultOp);
- }
- 0x4: decode FUNCT7 {
- 0x0: xor({{
- Rd = Rs1 ^ Rs2;
+ 0x5: lhu({{
+ Rd = Mem_uh;
}});
- 0x1: div({{
- if (Rs2_sd == 0) {
- Rd_sd = -1;
- } else if (Rs1_sd == std::numeric_limits<int64_t>::min()
- && Rs2_sd == -1) {
- Rd_sd = std::numeric_limits<int64_t>::min();
- } else {
- Rd_sd = Rs1_sd/Rs2_sd;
- }
- }}, IntDivOp);
- }
- 0x5: decode FUNCT7 {
- 0x0: srl({{
- Rd = Rs1 >> Rs2<5:0>;
- }});
- 0x1: divu({{
- if (Rs2 == 0) {
- Rd = std::numeric_limits<uint64_t>::max();
- } else {
- Rd = Rs1/Rs2;
- }
- }}, IntDivOp);
- 0x20: sra({{
- Rd_sd = Rs1_sd >> Rs2<5:0>;
+ 0x6: lwu({{
+ Rd = Mem_uw;
}});
}
- 0x6: decode FUNCT7 {
- 0x0: or({{
- Rd = Rs1 | Rs2;
+ }
+
+ 0x01: decode FUNCT3 {
+ format Load {
+ 0x2: flw({{
+ Fd_bits = (uint64_t)Mem_uw;
}});
- 0x1: rem({{
- if (Rs2_sd == 0) {
- Rd = Rs1_sd;
- } else if (Rs1_sd == std::numeric_limits<int64_t>::min()
- && Rs2_sd == -1) {
- Rd = 0;
- } else {
- Rd = Rs1_sd%Rs2_sd;
- }
- }}, IntDivOp);
- }
- 0x7: decode FUNCT7 {
- 0x0: and({{
- Rd = Rs1 & Rs2;
+ 0x3: fld({{
+ Fd_bits = Mem;
}});
- 0x1: remu({{
- if (Rs2 == 0) {
- Rd = Rs1;
- } else {
- Rd = Rs1%Rs2;
- }
- }}, IntDivOp);
}
}
- }
- 0x37: UOp::lui({{
- Rd = (uint64_t)imm;
- }});
+ 0x03: decode FUNCT3 {
+ format IOp {
+ 0x0: fence({{
+ }}, IsNonSpeculative, IsMemBarrier, No_OpClass);
+ 0x1: fence_i({{
+ }}, IsNonSpeculative, IsSerializeAfter, No_OpClass);
+ }
+ }
- 0x3b: decode FUNCT3 {
- format ROp {
- 0x0: decode FUNCT7 {
- 0x0: addw({{
- Rd_sd = Rs1_sw + Rs2_sw;
+ 0x04: decode FUNCT3 {
+ format IOp {
+ 0x0: addi({{
+ Rd_sd = Rs1_sd + imm;
}});
- 0x1: mulw({{
- Rd_sd = (int32_t)(Rs1_sw*Rs2_sw);
- }}, IntMultOp);
- 0x20: subw({{
- Rd_sd = Rs1_sw - Rs2_sw;
+ 0x1: slli({{
+ Rd = Rs1 << SHAMT6;
}});
- }
- 0x1: sllw({{
- Rd_sd = Rs1_sw << Rs2<4:0>;
- }});
- 0x4: divw({{
- if (Rs2_sw == 0) {
- Rd_sd = -1;
- } else if (Rs1_sw == std::numeric_limits<int32_t>::min()
- && Rs2_sw == -1) {
- Rd_sd = std::numeric_limits<int32_t>::min();
- } else {
- Rd_sd = Rs1_sw/Rs2_sw;
+ 0x2: slti({{
+ Rd = (Rs1_sd < imm) ? 1 : 0;
+ }});
+ 0x3: sltiu({{
+ Rd = (Rs1 < (uint64_t)imm) ? 1 : 0;
+ }});
+ 0x4: xori({{
+ Rd = Rs1 ^ (uint64_t)imm;
+ }});
+ 0x5: decode SRTYPE {
+ 0x0: srli({{
+ Rd = Rs1 >> SHAMT6;
+ }});
+ 0x1: srai({{
+ Rd_sd = Rs1_sd >> SHAMT6;
+ }});
}
- }}, IntDivOp);
- 0x5: decode FUNCT7 {
- 0x0: srlw({{
- Rd_uw = Rs1_uw >> Rs2<4:0>;
+ 0x6: ori({{
+ Rd = Rs1 | (uint64_t)imm;
}});
- 0x1: divuw({{
- if (Rs2_uw == 0) {
- Rd_sd = std::numeric_limits<IntReg>::max();
- } else {
- Rd_sd = (int32_t)(Rs1_uw/Rs2_uw);
- }
- }}, IntDivOp);
- 0x20: sraw({{
- Rd_sd = Rs1_sw >> Rs2<4:0>;
+ 0x7: andi({{
+ Rd = Rs1 & (uint64_t)imm;
}});
}
- 0x6: remw({{
- if (Rs2_sw == 0) {
- Rd_sd = Rs1_sw;
- } else if (Rs1_sw == std::numeric_limits<int32_t>::min()
- && Rs2_sw == -1) {
- Rd_sd = 0;
- } else {
- Rd_sd = Rs1_sw%Rs2_sw;
- }
- }}, IntDivOp);
- 0x7: remuw({{
- if (Rs2_uw == 0) {
- Rd_sd = (int32_t)Rs1_uw;
- } else {
- Rd_sd = (int32_t)(Rs1_uw%Rs2_uw);
- }
- }}, IntDivOp);
}
- }
- format FPROp {
- 0x43: decode FUNCT2 {
- 0x0: fmadd_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
- float fd;
+ 0x05: UOp::auipc({{
+ Rd = PC + imm;
+ }});
- if (std::isnan(fs1) || std::isnan(fs2) || std::isnan(fs3)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)
- || issignalingnan(fs3)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else if (std::isinf(fs1) || std::isinf(fs2) ||
- std::isinf(fs3)) {
- if (std::signbit(fs1) == std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = std::numeric_limits<float>::infinity();
- } else if (std::signbit(fs1) != std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = -std::numeric_limits<float>::infinity();
- } else { // Fs3_sf is infinity
- fd = fs3;
- }
- } else {
- fd = fs1*fs2 + fs3;
- }
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatMultOp);
- 0x1: fmadd_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2) || std::isnan(Fs3)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)
- || issignalingnan(Fs3)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else if (std::isinf(Fs1) || std::isinf(Fs2) ||
- std::isinf(Fs3)) {
- if (std::signbit(Fs1) == std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = std::numeric_limits<double>::infinity();
- } else if (std::signbit(Fs1) != std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = -std::numeric_limits<double>::infinity();
- } else {
- Fd = Fs3;
- }
- } else {
- Fd = Fs1*Fs2 + Fs3;
+ 0x06: decode FUNCT3 {
+ format IOp {
+ 0x0: addiw({{
+ Rd_sd = (int32_t)Rs1 + (int32_t)imm;
+ }});
+ 0x1: slliw({{
+ Rd_sd = Rs1_sw << SHAMT5;
+ }});
+ 0x5: decode SRTYPE {
+ 0x0: srliw({{
+ Rd = Rs1_uw >> SHAMT5;
+ }});
+ 0x1: sraiw({{
+ Rd_sd = Rs1_sw >> SHAMT5;
+ }});
}
- }}, FloatMultOp);
+ }
}
- 0x47: decode FUNCT2 {
- 0x0: fmsub_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
- float fd;
- if (std::isnan(fs1) || std::isnan(fs2) || std::isnan(fs3)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)
- || issignalingnan(fs3)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else if (std::isinf(fs1) || std::isinf(fs2) ||
- std::isinf(fs3)) {
- if (std::signbit(fs1) == std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = std::numeric_limits<float>::infinity();
- } else if (std::signbit(fs1) != std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = -std::numeric_limits<float>::infinity();
- } else { // Fs3_sf is infinity
- fd = -fs3;
- }
- } else {
- fd = fs1*fs2 - fs3;
- }
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatMultOp);
- 0x1: fmsub_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2) || std::isnan(Fs3)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)
- || issignalingnan(Fs3)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else if (std::isinf(Fs1) || std::isinf(Fs2) ||
- std::isinf(Fs3)) {
- if (std::signbit(Fs1) == std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = std::numeric_limits<double>::infinity();
- } else if (std::signbit(Fs1) != std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = -std::numeric_limits<double>::infinity();
- } else {
- Fd = -Fs3;
- }
- } else {
- Fd = Fs1*Fs2 - Fs3;
- }
- }}, FloatMultOp);
+ 0x08: decode FUNCT3 {
+ format Store {
+ 0x0: sb({{
+ Mem_ub = Rs2_ub;
+ }});
+ 0x1: sh({{
+ Mem_uh = Rs2_uh;
+ }});
+ 0x2: sw({{
+ Mem_uw = Rs2_uw;
+ }});
+ 0x3: sd({{
+ Mem_ud = Rs2_ud;
+ }});
+ }
}
- 0x4b: decode FUNCT2 {
- 0x0: fnmsub_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
- float fd;
- if (std::isnan(fs1) || std::isnan(fs2) || std::isnan(fs3)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)
- || issignalingnan(fs3)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else if (std::isinf(fs1) || std::isinf(fs2) ||
- std::isinf(fs3)) {
- if (std::signbit(fs1) == std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = -std::numeric_limits<float>::infinity();
- } else if (std::signbit(fs1) != std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = std::numeric_limits<float>::infinity();
- } else { // Fs3_sf is infinity
- fd = fs3;
- }
- } else {
- fd = -(fs1*fs2 - fs3);
- }
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatMultOp);
- 0x1: fnmsub_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2) || std::isnan(Fs3)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)
- || issignalingnan(Fs3)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else if (std::isinf(Fs1) || std::isinf(Fs2)
- || std::isinf(Fs3)) {
- if (std::signbit(Fs1) == std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = -std::numeric_limits<double>::infinity();
- } else if (std::signbit(Fs1) != std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = std::numeric_limits<double>::infinity();
- } else {
- Fd = Fs3;
- }
- } else {
- Fd = -(Fs1*Fs2 - Fs3);
- }
- }}, FloatMultOp);
+ 0x09: decode FUNCT3 {
+ format Store {
+ 0x2: fsw({{
+ Mem_uw = (uint32_t)Fs2_bits;
+ }});
+ 0x3: fsd({{
+ Mem_ud = Fs2_bits;
+ }});
+ }
}
- 0x4f: decode FUNCT2 {
- 0x0: fnmadd_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
- float fd;
- if (std::isnan(fs1) || std::isnan(fs2) || std::isnan(fs3)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)
- || issignalingnan(fs3)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else if (std::isinf(fs1) || std::isinf(fs2) ||
- std::isinf(fs3)) {
- if (std::signbit(fs1) == std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = -std::numeric_limits<float>::infinity();
- } else if (std::signbit(fs1) != std::signbit(fs2)
- && !std::isinf(fs3)) {
- fd = std::numeric_limits<float>::infinity();
- } else { // Fs3_sf is infinity
- fd = -fs3;
- }
- } else {
- fd = -(fs1*fs2 + fs3);
+ 0x0b: decode FUNCT3 {
+ 0x2: decode AMOFUNCT {
+ 0x2: LoadReserved::lr_w({{
+ Rd_sd = Mem_sw;
+ }}, mem_flags=LLSC);
+ 0x3: StoreCond::sc_w({{
+ Mem_uw = Rs2_uw;
+ }}, {{
+ Rd = result;
+ }}, inst_flags=IsStoreConditional, mem_flags=LLSC);
+ format AtomicMemOp {
+ 0x0: amoadd_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_sw + Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1: amoswap_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x4: amoxor_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw^Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x8: amoor_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw | Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0xc: amoand_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = Rs2_uw&Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x10: amomin_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = min<int32_t>(Rs2_sw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x14: amomax_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = max<int32_t>(Rs2_sw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x18: amominu_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = min<uint32_t>(Rs2_uw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1c: amomaxu_w({{Rt_sd = Mem_sw;}}, {{
+ Mem_sw = max<uint32_t>(Rs2_uw, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
}
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatMultOp);
- 0x1: fnmadd_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2) || std::isnan(Fs3)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)
- || issignalingnan(Fs3)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else if (std::isinf(Fs1) || std::isinf(Fs2) ||
- std::isinf(Fs3)) {
- if (std::signbit(Fs1) == std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = -std::numeric_limits<double>::infinity();
- } else if (std::signbit(Fs1) != std::signbit(Fs2)
- && !std::isinf(Fs3)) {
- Fd = std::numeric_limits<double>::infinity();
- } else {
- Fd = -Fs3;
- }
- } else {
- Fd = -(Fs1*Fs2 + Fs3);
+ }
+ 0x3: decode AMOFUNCT {
+ 0x2: LoadReserved::lr_d({{
+ Rd_sd = Mem_sd;
+ }}, mem_flags=LLSC);
+ 0x3: StoreCond::sc_d({{
+ Mem = Rs2;
+ }}, {{
+ Rd = result;
+ }}, mem_flags=LLSC, inst_flags=IsStoreConditional);
+ format AtomicMemOp {
+ 0x0: amoadd_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = Rs2_sd + Rt_sd;
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x1: amoswap_d({{Rt = Mem;}}, {{
+ Mem = Rs2;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x4: amoxor_d({{Rt = Mem;}}, {{
+ Mem = Rs2^Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x8: amoor_d({{Rt = Mem;}}, {{
+ Mem = Rs2 | Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0xc: amoand_d({{Rt = Mem;}}, {{
+ Mem = Rs2&Rt;
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x10: amomin_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = min(Rs2_sd, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x14: amomax_d({{Rt_sd = Mem_sd;}}, {{
+ Mem_sd = max(Rs2_sd, Rt_sd);
+ Rd_sd = Rt_sd;
+ }}, {{EA = Rs1;}});
+ 0x18: amominu_d({{Rt = Mem;}}, {{
+ Mem = min(Rs2, Rt);
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
+ 0x1c: amomaxu_d({{Rt = Mem;}}, {{
+ Mem = max(Rs2, Rt);
+ Rd = Rt;
+ }}, {{EA = Rs1;}});
}
- }}, FloatMultOp);
+ }
}
+ 0x0c: decode FUNCT3 {
+ format ROp {
+ 0x0: decode FUNCT7 {
+ 0x0: add({{
+ Rd = Rs1_sd + Rs2_sd;
+ }});
+ 0x1: mul({{
+ Rd = Rs1_sd*Rs2_sd;
+ }}, IntMultOp);
+ 0x20: sub({{
+ Rd = Rs1_sd - Rs2_sd;
+ }});
+ }
+ 0x1: decode FUNCT7 {
+ 0x0: sll({{
+ Rd = Rs1 << Rs2<5:0>;
+ }});
+ 0x1: mulh({{
+ bool negate = (Rs1_sd < 0) != (Rs2_sd < 0);
- 0x53: decode FUNCT7 {
- 0x0: fadd_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fd;
+ uint64_t Rs1_lo = (uint32_t)abs(Rs1_sd);
+ uint64_t Rs1_hi = (uint64_t)abs(Rs1_sd) >> 32;
+ uint64_t Rs2_lo = (uint32_t)abs(Rs2_sd);
+ uint64_t Rs2_hi = (uint64_t)abs(Rs2_sd) >> 32;
- if (std::isnan(fs1) || std::isnan(fs2)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else {
- fd = fs1 + fs2;
+ uint64_t hi = Rs1_hi*Rs2_hi;
+ uint64_t mid1 = Rs1_hi*Rs2_lo;
+ uint64_t mid2 = Rs1_lo*Rs2_hi;
+ uint64_t lo = Rs2_lo*Rs1_lo;
+ uint64_t carry = ((uint64_t)(uint32_t)mid1
+ + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
+
+ uint64_t res = hi +
+ (mid1 >> 32) +
+ (mid2 >> 32) +
+ carry;
+ Rd = negate ? ~res + (Rs1_sd*Rs2_sd == 0 ? 1 : 0)
+ : res;
+ }}, IntMultOp);
}
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatAddOp);
- 0x1: fadd_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else {
- Fd = Fs1 + Fs2;
+ 0x2: decode FUNCT7 {
+ 0x0: slt({{
+ Rd = (Rs1_sd < Rs2_sd) ? 1 : 0;
+ }});
+ 0x1: mulhsu({{
+ bool negate = Rs1_sd < 0;
+ uint64_t Rs1_lo = (uint32_t)abs(Rs1_sd);
+ uint64_t Rs1_hi = (uint64_t)abs(Rs1_sd) >> 32;
+ uint64_t Rs2_lo = (uint32_t)Rs2;
+ uint64_t Rs2_hi = Rs2 >> 32;
+
+ uint64_t hi = Rs1_hi*Rs2_hi;
+ uint64_t mid1 = Rs1_hi*Rs2_lo;
+ uint64_t mid2 = Rs1_lo*Rs2_hi;
+ uint64_t lo = Rs1_lo*Rs2_lo;
+ uint64_t carry = ((uint64_t)(uint32_t)mid1
+ + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
+
+ uint64_t res = hi +
+ (mid1 >> 32) +
+ (mid2 >> 32) +
+ carry;
+ Rd = negate ? ~res + (Rs1_sd*Rs2 == 0 ? 1 : 0) : res;
+ }}, IntMultOp);
}
- }}, FloatAddOp);
- 0x4: fsub_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fd;
+ 0x3: decode FUNCT7 {
+ 0x0: sltu({{
+ Rd = (Rs1 < Rs2) ? 1 : 0;
+ }});
+ 0x1: mulhu({{
+ uint64_t Rs1_lo = (uint32_t)Rs1;
+ uint64_t Rs1_hi = Rs1 >> 32;
+ uint64_t Rs2_lo = (uint32_t)Rs2;
+ uint64_t Rs2_hi = Rs2 >> 32;
- if (std::isnan(fs1) || std::isnan(fs2)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else {
- fd = fs1 - fs2;
+ uint64_t hi = Rs1_hi*Rs2_hi;
+ uint64_t mid1 = Rs1_hi*Rs2_lo;
+ uint64_t mid2 = Rs1_lo*Rs2_hi;
+ uint64_t lo = Rs1_lo*Rs2_lo;
+ uint64_t carry = ((uint64_t)(uint32_t)mid1
+ + (uint64_t)(uint32_t)mid2 + (lo >> 32)) >> 32;
+
+ Rd = hi + (mid1 >> 32) + (mid2 >> 32) + carry;
+ }}, IntMultOp);
}
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatAddOp);
- 0x5: fsub_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else {
- Fd = Fs1 - Fs2;
+ 0x4: decode FUNCT7 {
+ 0x0: xor({{
+ Rd = Rs1 ^ Rs2;
+ }});
+ 0x1: div({{
+ if (Rs2_sd == 0) {
+ Rd_sd = -1;
+ } else if (Rs1_sd == numeric_limits<int64_t>::min()
+ && Rs2_sd == -1) {
+ Rd_sd = numeric_limits<int64_t>::min();
+ } else {
+ Rd_sd = Rs1_sd/Rs2_sd;
+ }
+ }}, IntDivOp);
}
- }}, FloatAddOp);
- 0x8: fmul_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fd;
-
- if (std::isnan(fs1) || std::isnan(fs2)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else {
- fd = fs1*fs2;
+ 0x5: decode FUNCT7 {
+ 0x0: srl({{
+ Rd = Rs1 >> Rs2<5:0>;
+ }});
+ 0x1: divu({{
+ if (Rs2 == 0) {
+ Rd = numeric_limits<uint64_t>::max();
+ } else {
+ Rd = Rs1/Rs2;
+ }
+ }}, IntDivOp);
+ 0x20: sra({{
+ Rd_sd = Rs1_sd >> Rs2<5:0>;
+ }});
}
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatMultOp);
- 0x9: fmul_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- }
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else {
- Fd = Fs1*Fs2;
+ 0x6: decode FUNCT7 {
+ 0x0: or({{
+ Rd = Rs1 | Rs2;
+ }});
+ 0x1: rem({{
+ if (Rs2_sd == 0) {
+ Rd = Rs1_sd;
+ } else if (Rs1_sd == numeric_limits<int64_t>::min()
+ && Rs2_sd == -1) {
+ Rd = 0;
+ } else {
+ Rd = Rs1_sd%Rs2_sd;
+ }
+ }}, IntDivOp);
+ }
+ 0x7: decode FUNCT7 {
+ 0x0: and({{
+ Rd = Rs1 & Rs2;
+ }});
+ 0x1: remu({{
+ if (Rs2 == 0) {
+ Rd = Rs1;
+ } else {
+ Rd = Rs1%Rs2;
+ }
+ }}, IntDivOp);
}
- }}, FloatMultOp);
- 0xc: fdiv_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
- float fd;
+ }
+ }
- if (std::isnan(fs1) || std::isnan(fs2)) {
- if (issignalingnan(fs1) || issignalingnan(fs2)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::numeric_limits<float>::quiet_NaN();
- } else {
- fd = fs1/fs2;
+ 0x0d: UOp::lui({{
+ Rd = (uint64_t)imm;
+ }});
+
+ 0x0e: decode FUNCT3 {
+ format ROp {
+ 0x0: decode FUNCT7 {
+ 0x0: addw({{
+ Rd_sd = Rs1_sw + Rs2_sw;
+ }});
+ 0x1: mulw({{
+ Rd_sd = (int32_t)(Rs1_sw*Rs2_sw);
+ }}, IntMultOp);
+ 0x20: subw({{
+ Rd_sd = Rs1_sw - Rs2_sw;
+ }});
}
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatDivOp);
- 0xd: fdiv_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
- FFLAGS |= FloatInvalid;
+ 0x1: sllw({{
+ Rd_sd = Rs1_sw << Rs2<4:0>;
+ }});
+ 0x4: divw({{
+ if (Rs2_sw == 0) {
+ Rd_sd = -1;
+ } else if (Rs1_sw == numeric_limits<int32_t>::min()
+ && Rs2_sw == -1) {
+ Rd_sd = numeric_limits<int32_t>::min();
+ } else {
+ Rd_sd = Rs1_sw/Rs2_sw;
}
- Fd = std::numeric_limits<double>::quiet_NaN();
- } else {
- Fd = Fs1/Fs2;
+ }}, IntDivOp);
+ 0x5: decode FUNCT7 {
+ 0x0: srlw({{
+ Rd_uw = Rs1_uw >> Rs2<4:0>;
+ }});
+ 0x1: divuw({{
+ if (Rs2_uw == 0) {
+ Rd_sd = numeric_limits<IntReg>::max();
+ } else {
+ Rd_sd = (int32_t)(Rs1_uw/Rs2_uw);
+ }
+ }}, IntDivOp);
+ 0x20: sraw({{
+ Rd_sd = Rs1_sw >> Rs2<4:0>;
+ }});
}
- }}, FloatDivOp);
- 0x10: decode ROUND_MODE {
- 0x0: fsgnj_s({{
+ 0x6: remw({{
+ if (Rs2_sw == 0) {
+ Rd_sd = Rs1_sw;
+ } else if (Rs1_sw == numeric_limits<int32_t>::min()
+ && Rs2_sw == -1) {
+ Rd_sd = 0;
+ } else {
+ Rd_sd = Rs1_sw%Rs2_sw;
+ }
+ }}, IntDivOp);
+ 0x7: remuw({{
+ if (Rs2_uw == 0) {
+ Rd_sd = (int32_t)Rs1_uw;
+ } else {
+ Rd_sd = (int32_t)(Rs1_uw%Rs2_uw);
+ }
+ }}, IntDivOp);
+ }
+ }
+
+ format FPROp {
+ 0x10: decode FUNCT2 {
+ 0x0: fmadd_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
float fd;
- if (issignalingnan(fs1)) {
- fd = std::numeric_limits<float>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
+ if (isnan(fs1) || isnan(fs2) || isnan(fs3)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)
+ || issignalingnan(fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
+ } else if (isinf(fs1) || isinf(fs2) ||
+ isinf(fs3)) {
+ if (signbit(fs1) == signbit(fs2)
+ && !isinf(fs3)) {
+ fd = numeric_limits<float>::infinity();
+ } else if (signbit(fs1) != signbit(fs2)
+ && !isinf(fs3)) {
+ fd = -numeric_limits<float>::infinity();
+ } else { // Fs3_sf is infinity
+ fd = fs3;
+ }
} else {
- fd = std::copysign(fs1, fs2);
+ fd = fs1*fs2 + fs3;
}
Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }});
- 0x1: fsgnjn_s({{
+ }}, FloatMultOp);
+ 0x1: fmadd_d({{
+ if (isnan(Fs1) || isnan(Fs2) || isnan(Fs3)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)
+ || issignalingnan(Fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
+ } else if (isinf(Fs1) || isinf(Fs2) ||
+ isinf(Fs3)) {
+ if (signbit(Fs1) == signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = numeric_limits<double>::infinity();
+ } else if (signbit(Fs1) != signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = -numeric_limits<double>::infinity();
+ } else {
+ Fd = Fs3;
+ }
+ } else {
+ Fd = Fs1*Fs2 + Fs3;
+ }
+ }}, FloatMultOp);
+ }
+ 0x11: decode FUNCT2 {
+ 0x0: fmsub_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
float fd;
- if (issignalingnan(fs1)) {
- fd = std::numeric_limits<float>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
+ if (isnan(fs1) || isnan(fs2) || isnan(fs3)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)
+ || issignalingnan(fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
+ } else if (isinf(fs1) || isinf(fs2) ||
+ isinf(fs3)) {
+ if (signbit(fs1) == signbit(fs2)
+ && !isinf(fs3)) {
+ fd = numeric_limits<float>::infinity();
+ } else if (signbit(fs1) != signbit(fs2)
+ && !isinf(fs3)) {
+ fd = -numeric_limits<float>::infinity();
+ } else { // Fs3_sf is infinity
+ fd = -fs3;
+ }
} else {
- fd = std::copysign(fs1, -fs2);
+ fd = fs1*fs2 - fs3;
}
Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }});
- 0x2: fsgnjx_s({{
+ }}, FloatMultOp);
+ 0x1: fmsub_d({{
+ if (isnan(Fs1) || isnan(Fs2) || isnan(Fs3)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)
+ || issignalingnan(Fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
+ } else if (isinf(Fs1) || isinf(Fs2) ||
+ isinf(Fs3)) {
+ if (signbit(Fs1) == signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = numeric_limits<double>::infinity();
+ } else if (signbit(Fs1) != signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = -numeric_limits<double>::infinity();
+ } else {
+ Fd = -Fs3;
+ }
+ } else {
+ Fd = Fs1*Fs2 - Fs3;
+ }
+ }}, FloatMultOp);
+ }
+ 0x12: decode FUNCT2 {
+ 0x0: fnmsub_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
float fd;
- if (issignalingnan(fs1)) {
- fd = std::numeric_limits<float>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
+ if (isnan(fs1) || isnan(fs2) || isnan(fs3)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)
+ || issignalingnan(fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
+ } else if (isinf(fs1) || isinf(fs2) ||
+ isinf(fs3)) {
+ if (signbit(fs1) == signbit(fs2)
+ && !isinf(fs3)) {
+ fd = -numeric_limits<float>::infinity();
+ } else if (signbit(fs1) != signbit(fs2)
+ && !isinf(fs3)) {
+ fd = numeric_limits<float>::infinity();
+ } else { // Fs3_sf is infinity
+ fd = fs3;
+ }
} else {
- fd = fs1*(std::signbit(fs2) ? -1.0 : 1.0);
+ fd = -(fs1*fs2 - fs3);
}
Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }});
- }
- 0x11: decode ROUND_MODE {
- 0x0: fsgnj_d({{
- if (issignalingnan(Fs1)) {
- Fd = std::numeric_limits<double>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
- } else {
- Fd = std::copysign(Fs1, Fs2);
- }
- }});
- 0x1: fsgnjn_d({{
- if (issignalingnan(Fs1)) {
- Fd = std::numeric_limits<double>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
- } else {
- Fd = std::copysign(Fs1, -Fs2);
- }
- }});
- 0x2: fsgnjx_d({{
- if (issignalingnan(Fs1)) {
- Fd = std::numeric_limits<double>::signaling_NaN();
- std::feclearexcept(FE_INVALID);
+ }}, FloatMultOp);
+ 0x1: fnmsub_d({{
+ if (isnan(Fs1) || isnan(Fs2) || isnan(Fs3)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)
+ || issignalingnan(Fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
+ } else if (isinf(Fs1) || isinf(Fs2)
+ || isinf(Fs3)) {
+ if (signbit(Fs1) == signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = -numeric_limits<double>::infinity();
+ } else if (signbit(Fs1) != signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = numeric_limits<double>::infinity();
+ } else {
+ Fd = Fs3;
+ }
} else {
- Fd = Fs1*(std::signbit(Fs2) ? -1.0 : 1.0);
+ Fd = -(Fs1*Fs2 - Fs3);
}
- }});
+ }}, FloatMultOp);
}
- 0x14: decode ROUND_MODE {
- 0x0: fmin_s({{
+ 0x13: decode FUNCT2 {
+ 0x0: fnmadd_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fs3 = reinterpret_cast<float&>(temp = Fs3_bits);
float fd;
- if (issignalingnan(fs2)) {
- fd = fs1;
- FFLAGS |= FloatInvalid;
- } else if (issignalingnan(fs1)) {
- fd = fs2;
- FFLAGS |= FloatInvalid;
+ if (isnan(fs1) || isnan(fs2) || isnan(fs3)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)
+ || issignalingnan(fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
+ } else if (isinf(fs1) || isinf(fs2) ||
+ isinf(fs3)) {
+ if (signbit(fs1) == signbit(fs2)
+ && !isinf(fs3)) {
+ fd = -numeric_limits<float>::infinity();
+ } else if (signbit(fs1) != signbit(fs2)
+ && !isinf(fs3)) {
+ fd = numeric_limits<float>::infinity();
+ } else { // Fs3_sf is infinity
+ fd = -fs3;
+ }
} else {
- fd = std::fmin(fs1, fs2);
+ fd = -(fs1*fs2 + fs3);
}
Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatCmpOp);
- 0x1: fmax_s({{
+ }}, FloatMultOp);
+ 0x1: fnmadd_d({{
+ if (isnan(Fs1) || isnan(Fs2) || isnan(Fs3)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)
+ || issignalingnan(Fs3)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
+ } else if (isinf(Fs1) || isinf(Fs2) ||
+ isinf(Fs3)) {
+ if (signbit(Fs1) == signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = -numeric_limits<double>::infinity();
+ } else if (signbit(Fs1) != signbit(Fs2)
+ && !isinf(Fs3)) {
+ Fd = numeric_limits<double>::infinity();
+ } else {
+ Fd = -Fs3;
+ }
+ } else {
+ Fd = -(Fs1*Fs2 + Fs3);
+ }
+ }}, FloatMultOp);
+ }
+ 0x14: decode FUNCT7 {
+ 0x0: fadd_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
float fd;
- if (issignalingnan(fs2)) {
- fd = fs1;
- FFLAGS |= FloatInvalid;
- } else if (issignalingnan(fs1)) {
- fd = fs2;
- FFLAGS |= FloatInvalid;
+ if (isnan(fs1) || isnan(fs2)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
} else {
- fd = std::fmax(fs1, fs2);
+ fd = fs1 + fs2;
}
Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatCmpOp);
- }
- 0x15: decode ROUND_MODE {
- 0x0: fmin_d({{
- if (issignalingnan(Fs2)) {
- Fd = Fs1;
- FFLAGS |= FloatInvalid;
- } else if (issignalingnan(Fs1)) {
- Fd = Fs2;
- FFLAGS |= FloatInvalid;
- } else {
- Fd = std::fmin(Fs1, Fs2);
- }
- }}, FloatCmpOp);
- 0x1: fmax_d({{
- if (issignalingnan(Fs2)) {
- Fd = Fs1;
- FFLAGS |= FloatInvalid;
- } else if (issignalingnan(Fs1)) {
- Fd = Fs2;
- FFLAGS |= FloatInvalid;
+ }}, FloatAddOp);
+ 0x1: fadd_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
} else {
- Fd = std::fmax(Fs1, Fs2);
+ Fd = Fs1 + Fs2;
}
- }}, FloatCmpOp);
- }
- 0x20: fcvt_s_d({{
- assert(CONV_SGN == 1);
- float fd;
- if (issignalingnan(Fs1)) {
- fd = std::numeric_limits<float>::quiet_NaN();
- FFLAGS |= FloatInvalid;
- } else {
- fd = (float)Fs1;
- }
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatCvtOp);
- 0x21: fcvt_d_s({{
- assert(CONV_SGN == 0);
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
-
- if (issignalingnan(fs1)) {
- Fd = std::numeric_limits<double>::quiet_NaN();
- FFLAGS |= FloatInvalid;
- } else {
- Fd = (double)fs1;
- }
- }}, FloatCvtOp);
- 0x2c: fsqrt_s({{
- assert(RS2 == 0);
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fd;
-
- if (issignalingnan(Fs1_sf)) {
- FFLAGS |= FloatInvalid;
- }
- fd = std::sqrt(fs1);
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
- }}, FloatSqrtOp);
- 0x2d: fsqrt_d({{
- assert(RS2 == 0);
- Fd = std::sqrt(Fs1);
- }}, FloatSqrtOp);
- 0x50: decode ROUND_MODE {
- 0x0: fle_s({{
+ }}, FloatAddOp);
+ 0x4: fsub_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
- if (std::isnan(fs1) || std::isnan(fs2)) {
- FFLAGS |= FloatInvalid;
- Rd = 0;
+ if (isnan(fs1) || isnan(fs2)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
} else {
- Rd = fs1 <= fs2 ? 1 : 0;
+ fd = fs1 - fs2;
}
- }}, FloatCmpOp);
- 0x1: flt_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
-
- if (std::isnan(fs1) || std::isnan(fs2)) {
- FFLAGS |= FloatInvalid;
- Rd = 0;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatAddOp);
+ 0x5: fsub_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
} else {
- Rd = fs1 < fs2 ? 1 : 0;
+ Fd = Fs1 - Fs2;
}
- }}, FloatCmpOp);
- 0x2: feq_s({{
+ }}, FloatAddOp);
+ 0x8: fmul_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
- if (issignalingnan(fs1) || issignalingnan(fs2)) {
- FFLAGS |= FloatInvalid;
- }
- Rd = fs1 == fs2 ? 1 : 0;
- }}, FloatCmpOp);
- }
- 0x51: decode ROUND_MODE {
- 0x0: fle_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- Rd = 0;
+ if (isnan(fs1) || isnan(fs2)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
} else {
- Rd = Fs1 <= Fs2 ? 1 : 0;
+ fd = fs1*fs2;
}
- }}, FloatCmpOp);
- 0x1: flt_d({{
- if (std::isnan(Fs1) || std::isnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- Rd = 0;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatMultOp);
+ 0x9: fmul_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ Fd = numeric_limits<double>::quiet_NaN();
} else {
- Rd = Fs1 < Fs2 ? 1 : 0;
+ Fd = Fs1*Fs2;
}
- }}, FloatCmpOp);
- 0x2: feq_d({{
- if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
- FFLAGS |= FloatInvalid;
- }
- Rd = Fs1 == Fs2 ? 1 : 0;
- }}, FloatCmpOp);
- }
- 0x60: decode CONV_SGN {
- 0x0: fcvt_w_s({{
+ }}, FloatMultOp);
+ 0xc: fdiv_s({{
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
- if (std::isnan(fs1)) {
- Rd_sd = std::numeric_limits<int32_t>::max();
- FFLAGS |= FloatInvalid;
+ if (isnan(fs1) || isnan(fs2)) {
+ if (issignalingnan(fs1) || issignalingnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ }
+ fd = numeric_limits<float>::quiet_NaN();
} else {
- Rd_sd = (int32_t)fs1;
- if (std::fetestexcept(FE_INVALID)) {
- if (std::signbit(fs1)) {
- Rd_sd = std::numeric_limits<int32_t>::min();
- } else {
- Rd_sd = std::numeric_limits<int32_t>::max();
- }
- std::feclearexcept(FE_INEXACT);
+ fd = fs1/fs2;
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatDivOp);
+ 0xd: fdiv_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
}
+ Fd = numeric_limits<double>::quiet_NaN();
+ } else {
+ Fd = Fs1/Fs2;
}
- }}, FloatCvtOp);
- 0x1: fcvt_wu_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ }}, FloatDivOp);
+ 0x10: decode ROUND_MODE {
+ 0x0: fsgnj_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
- if (fs1 < 0.0) {
- Rd = 0;
+ if (issignalingnan(fs1)) {
+ fd = numeric_limits<float>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ fd = copysign(fs1, fs2);
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }});
+ 0x1: fsgnjn_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
+
+ if (issignalingnan(fs1)) {
+ fd = numeric_limits<float>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ fd = copysign(fs1, -fs2);
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }});
+ 0x2: fsgnjx_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
+
+ if (issignalingnan(fs1)) {
+ fd = numeric_limits<float>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ fd = fs1*(signbit(fs2) ? -1.0 : 1.0);
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }});
+ }
+ 0x11: decode ROUND_MODE {
+ 0x0: fsgnj_d({{
+ if (issignalingnan(Fs1)) {
+ Fd = numeric_limits<double>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ Fd = copysign(Fs1, Fs2);
+ }
+ }});
+ 0x1: fsgnjn_d({{
+ if (issignalingnan(Fs1)) {
+ Fd = numeric_limits<double>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ Fd = copysign(Fs1, -Fs2);
+ }
+ }});
+ 0x2: fsgnjx_d({{
+ if (issignalingnan(Fs1)) {
+ Fd = numeric_limits<double>::signaling_NaN();
+ feclearexcept(FE_INVALID);
+ } else {
+ Fd = Fs1*(signbit(Fs2) ? -1.0 : 1.0);
+ }
+ }});
+ }
+ 0x14: decode ROUND_MODE {
+ 0x0: fmin_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
+
+ if (issignalingnan(fs2)) {
+ fd = fs1;
+ FFLAGS |= FloatInvalid;
+ } else if (issignalingnan(fs1)) {
+ fd = fs2;
+ FFLAGS |= FloatInvalid;
+ } else {
+ fd = fmin(fs1, fs2);
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatCmpOp);
+ 0x1: fmax_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+ float fd;
+
+ if (issignalingnan(fs2)) {
+ fd = fs1;
+ FFLAGS |= FloatInvalid;
+ } else if (issignalingnan(fs1)) {
+ fd = fs2;
+ FFLAGS |= FloatInvalid;
+ } else {
+ fd = fmax(fs1, fs2);
+ }
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatCmpOp);
+ }
+ 0x15: decode ROUND_MODE {
+ 0x0: fmin_d({{
+ if (issignalingnan(Fs2)) {
+ Fd = Fs1;
+ FFLAGS |= FloatInvalid;
+ } else if (issignalingnan(Fs1)) {
+ Fd = Fs2;
+ FFLAGS |= FloatInvalid;
+ } else {
+ Fd = fmin(Fs1, Fs2);
+ }
+ }}, FloatCmpOp);
+ 0x1: fmax_d({{
+ if (issignalingnan(Fs2)) {
+ Fd = Fs1;
+ FFLAGS |= FloatInvalid;
+ } else if (issignalingnan(Fs1)) {
+ Fd = Fs2;
+ FFLAGS |= FloatInvalid;
+ } else {
+ Fd = fmax(Fs1, Fs2);
+ }
+ }}, FloatCmpOp);
+ }
+ 0x20: fcvt_s_d({{
+ assert(CONV_SGN == 1);
+ float fd;
+ if (issignalingnan(Fs1)) {
+ fd = numeric_limits<float>::quiet_NaN();
FFLAGS |= FloatInvalid;
} else {
- Rd = (uint32_t)fs1;
- if (std::fetestexcept(FE_INVALID)) {
- Rd = std::numeric_limits<uint64_t>::max();
- std::feclearexcept(FE_INEXACT);
- }
+ fd = (float)Fs1;
}
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
}}, FloatCvtOp);
- 0x2: fcvt_l_s({{
+ 0x21: fcvt_d_s({{
+ assert(CONV_SGN == 0);
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- if (std::isnan(fs1)) {
- Rd_sd = std::numeric_limits<int64_t>::max();
+ if (issignalingnan(fs1)) {
+ Fd = numeric_limits<double>::quiet_NaN();
FFLAGS |= FloatInvalid;
} else {
- Rd_sd = (int64_t)fs1;
- if (std::fetestexcept(FE_INVALID)) {
- if (std::signbit(fs1)) {
- Rd_sd = std::numeric_limits<int64_t>::min();
- } else {
- Rd_sd = std::numeric_limits<int64_t>::max();
- }
- std::feclearexcept(FE_INEXACT);
- }
+ Fd = (double)fs1;
}
}}, FloatCvtOp);
- 0x3: fcvt_lu_s({{
+ 0x2c: fsqrt_s({{
+ assert(RS2 == 0);
uint32_t temp;
float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fd;
- if (fs1 < 0.0) {
- Rd = 0;
+ if (issignalingnan(Fs1_sf)) {
FFLAGS |= FloatInvalid;
- } else {
- Rd = (uint64_t)fs1;
- if (std::fetestexcept(FE_INVALID)) {
- Rd = std::numeric_limits<uint64_t>::max();
- std::feclearexcept(FE_INEXACT);
- }
}
- }}, FloatCvtOp);
- }
- 0x61: decode CONV_SGN {
- 0x0: fcvt_w_d({{
- Rd_sd = (int32_t)Fs1;
- if (std::fetestexcept(FE_INVALID)) {
- if (Fs1 < 0.0) {
- Rd_sd = std::numeric_limits<int32_t>::min();
+ fd = sqrt(fs1);
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(fd);
+ }}, FloatSqrtOp);
+ 0x2d: fsqrt_d({{
+ assert(RS2 == 0);
+ Fd = sqrt(Fs1);
+ }}, FloatSqrtOp);
+ 0x50: decode ROUND_MODE {
+ 0x0: fle_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+
+ if (isnan(fs1) || isnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ Rd = 0;
} else {
- Rd_sd = std::numeric_limits<int32_t>::max();
- }
- std::feclearexcept(FE_INEXACT);
- }
- }}, FloatCvtOp);
- 0x1: fcvt_wu_d({{
- if (Fs1 < 0.0) {
- Rd = 0;
- FFLAGS |= FloatInvalid;
- } else {
- Rd = (uint32_t)Fs1;
- if (std::fetestexcept(FE_INVALID)) {
- Rd = std::numeric_limits<uint64_t>::max();
- std::feclearexcept(FE_INEXACT);
+ Rd = fs1 <= fs2 ? 1 : 0;
}
- }
- }}, FloatCvtOp);
- 0x2: fcvt_l_d({{
- Rd_sd = Fs1;
- if (std::fetestexcept(FE_INVALID)) {
- if (Fs1 < 0.0) {
- Rd_sd = std::numeric_limits<int64_t>::min();
+ }}, FloatCmpOp);
+ 0x1: flt_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+
+ if (isnan(fs1) || isnan(fs2)) {
+ FFLAGS |= FloatInvalid;
+ Rd = 0;
} else {
- Rd_sd = std::numeric_limits<int64_t>::max();
+ Rd = fs1 < fs2 ? 1 : 0;
}
- std::feclearexcept(FE_INEXACT);
- }
- }}, FloatCvtOp);
- 0x3: fcvt_lu_d({{
- if (Fs1 < 0.0) {
- Rd = 0;
- FFLAGS |= FloatInvalid;
- } else {
- Rd = (uint64_t)Fs1;
- if (std::fetestexcept(FE_INVALID)) {
- Rd = std::numeric_limits<uint64_t>::max();
- std::feclearexcept(FE_INEXACT);
+ }}, FloatCmpOp);
+ 0x2: feq_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ float fs2 = reinterpret_cast<float&>(temp = Fs2_bits);
+
+ if (issignalingnan(fs1) || issignalingnan(fs2)) {
+ FFLAGS |= FloatInvalid;
}
- }
- }}, FloatCvtOp);
- }
- 0x68: decode CONV_SGN {
- 0x0: fcvt_s_w({{
- float temp = (float)Rs1_sw;
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
- }}, FloatCvtOp);
- 0x1: fcvt_s_wu({{
- float temp = (float)Rs1_uw;
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
- }}, FloatCvtOp);
- 0x2: fcvt_s_l({{
- float temp = (float)Rs1_sd;
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
- }}, FloatCvtOp);
- 0x3: fcvt_s_lu({{
- float temp = (float)Rs1;
- Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
- }}, FloatCvtOp);
- }
- 0x69: decode CONV_SGN {
- 0x0: fcvt_d_w({{
- Fd = (double)Rs1_sw;
- }}, FloatCvtOp);
- 0x1: fcvt_d_wu({{
- Fd = (double)Rs1_uw;
- }}, FloatCvtOp);
- 0x2: fcvt_d_l({{
- Fd = (double)Rs1_sd;
- }}, FloatCvtOp);
- 0x3: fcvt_d_lu({{
- Fd = (double)Rs1;
- }}, FloatCvtOp);
- }
- 0x70: decode ROUND_MODE {
- 0x0: fmv_x_s({{
- Rd = (uint32_t)Fs1_bits;
- if ((Rd&0x80000000) != 0) {
- Rd |= (0xFFFFFFFFULL << 32);
- }
- }}, FloatCvtOp);
- 0x1: fclass_s({{
- uint32_t temp;
- float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
- switch (std::fpclassify(fs1)) {
- case FP_INFINITE:
- if (std::signbit(fs1)) {
- Rd = 1 << 0;
+ Rd = fs1 == fs2 ? 1 : 0;
+ }}, FloatCmpOp);
+ }
+ 0x51: decode ROUND_MODE {
+ 0x0: fle_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
+ Rd = 0;
} else {
- Rd = 1 << 7;
+ Rd = Fs1 <= Fs2 ? 1 : 0;
}
- break;
- case FP_NAN:
- if (issignalingnan(fs1)) {
- Rd = 1 << 8;
+ }}, FloatCmpOp);
+ 0x1: flt_d({{
+ if (isnan(Fs1) || isnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
+ Rd = 0;
} else {
- Rd = 1 << 9;
+ Rd = Fs1 < Fs2 ? 1 : 0;
}
- break;
- case FP_ZERO:
- if (std::signbit(fs1)) {
- Rd = 1 << 3;
- } else {
- Rd = 1 << 4;
+ }}, FloatCmpOp);
+ 0x2: feq_d({{
+ if (issignalingnan(Fs1) || issignalingnan(Fs2)) {
+ FFLAGS |= FloatInvalid;
}
- break;
- case FP_SUBNORMAL:
- if (std::signbit(fs1)) {
- Rd = 1 << 2;
+ Rd = Fs1 == Fs2 ? 1 : 0;
+ }}, FloatCmpOp);
+ }
+ 0x60: decode CONV_SGN {
+ 0x0: fcvt_w_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+
+ if (isnan(fs1)) {
+ Rd_sd = numeric_limits<int32_t>::max();
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 5;
+ Rd_sd = (int32_t)fs1;
+ if (fetestexcept(FE_INVALID)) {
+ if (signbit(fs1)) {
+ Rd_sd = numeric_limits<int32_t>::min();
+ } else {
+ Rd_sd = numeric_limits<int32_t>::max();
+ }
+ feclearexcept(FE_INEXACT);
+ }
}
- break;
- case FP_NORMAL:
- if (std::signbit(fs1)) {
- Rd = 1 << 1;
+ }}, FloatCvtOp);
+ 0x1: fcvt_wu_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+
+ if (fs1 < 0.0) {
+ Rd = 0;
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 6;
+ Rd = (uint32_t)fs1;
+ if (fetestexcept(FE_INVALID)) {
+ Rd = numeric_limits<uint64_t>::max();
+ feclearexcept(FE_INEXACT);
+ }
}
- break;
- default:
- panic("Unknown classification for operand.");
- break;
- }
- }});
- }
- 0x71: decode ROUND_MODE {
- 0x0: fmv_x_d({{
- Rd = Fs1_bits;
- }}, FloatCvtOp);
- 0x1: fclass_d({{
- switch (std::fpclassify(Fs1)) {
- case FP_INFINITE:
- if (std::signbit(Fs1)) {
- Rd = 1 << 0;
+ }}, FloatCvtOp);
+ 0x2: fcvt_l_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+
+ if (isnan(fs1)) {
+ Rd_sd = numeric_limits<int64_t>::max();
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 7;
+ Rd_sd = (int64_t)fs1;
+ if (fetestexcept(FE_INVALID)) {
+ if (signbit(fs1)) {
+ Rd_sd = numeric_limits<int64_t>::min();
+ } else {
+ Rd_sd = numeric_limits<int64_t>::max();
+ }
+ feclearexcept(FE_INEXACT);
+ }
}
- break;
- case FP_NAN:
- if (issignalingnan(Fs1)) {
- Rd = 1 << 8;
+ }}, FloatCvtOp);
+ 0x3: fcvt_lu_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+
+ if (fs1 < 0.0) {
+ Rd = 0;
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 9;
+ Rd = (uint64_t)fs1;
+ if (fetestexcept(FE_INVALID)) {
+ Rd = numeric_limits<uint64_t>::max();
+ feclearexcept(FE_INEXACT);
+ }
}
- break;
- case FP_ZERO:
- if (std::signbit(Fs1)) {
- Rd = 1 << 3;
- } else {
- Rd = 1 << 4;
+ }}, FloatCvtOp);
+ }
+ 0x61: decode CONV_SGN {
+ 0x0: fcvt_w_d({{
+ Rd_sd = (int32_t)Fs1;
+ if (fetestexcept(FE_INVALID)) {
+ if (Fs1 < 0.0) {
+ Rd_sd = numeric_limits<int32_t>::min();
+ } else {
+ Rd_sd = numeric_limits<int32_t>::max();
+ }
+ feclearexcept(FE_INEXACT);
}
- break;
- case FP_SUBNORMAL:
- if (std::signbit(Fs1)) {
- Rd = 1 << 2;
+ }}, FloatCvtOp);
+ 0x1: fcvt_wu_d({{
+ if (Fs1 < 0.0) {
+ Rd = 0;
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 5;
+ Rd = (uint32_t)Fs1;
+ if (fetestexcept(FE_INVALID)) {
+ Rd = numeric_limits<uint64_t>::max();
+ feclearexcept(FE_INEXACT);
+ }
+ }
+ }}, FloatCvtOp);
+ 0x2: fcvt_l_d({{
+ Rd_sd = Fs1;
+ if (fetestexcept(FE_INVALID)) {
+ if (Fs1 < 0.0) {
+ Rd_sd = numeric_limits<int64_t>::min();
+ } else {
+ Rd_sd = numeric_limits<int64_t>::max();
+ }
+ feclearexcept(FE_INEXACT);
}
- break;
- case FP_NORMAL:
- if (std::signbit(Fs1)) {
- Rd = 1 << 1;
+ }}, FloatCvtOp);
+ 0x3: fcvt_lu_d({{
+ if (Fs1 < 0.0) {
+ Rd = 0;
+ FFLAGS |= FloatInvalid;
} else {
- Rd = 1 << 6;
+ Rd = (uint64_t)Fs1;
+ if (fetestexcept(FE_INVALID)) {
+ Rd = numeric_limits<uint64_t>::max();
+ feclearexcept(FE_INEXACT);
+ }
}
- break;
- default:
- panic("Unknown classification for operand.");
- break;
- }
- }});
- }
- 0x78: fmv_s_x({{
- Fd_bits = (uint64_t)Rs1_uw;
- }}, FloatCvtOp);
- 0x79: fmv_d_x({{
- Fd_bits = Rs1;
- }}, FloatCvtOp);
- }
- }
-
- 0x63: decode FUNCT3 {
- format BOp {
- 0x0: beq({{
- if (Rs1 == Rs2) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
+ }}, FloatCvtOp);
}
- }}, IsDirectControl, IsCondControl);
- 0x1: bne({{
- if (Rs1 != Rs2) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
+ 0x68: decode CONV_SGN {
+ 0x0: fcvt_s_w({{
+ float temp = (float)Rs1_sw;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
+ }}, FloatCvtOp);
+ 0x1: fcvt_s_wu({{
+ float temp = (float)Rs1_uw;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
+ }}, FloatCvtOp);
+ 0x2: fcvt_s_l({{
+ float temp = (float)Rs1_sd;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
+ }}, FloatCvtOp);
+ 0x3: fcvt_s_lu({{
+ float temp = (float)Rs1;
+ Fd_bits = (uint64_t)reinterpret_cast<uint32_t&>(temp);
+ }}, FloatCvtOp);
}
- }}, IsDirectControl, IsCondControl);
- 0x4: blt({{
- if (Rs1_sd < Rs2_sd) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
+ 0x69: decode CONV_SGN {
+ 0x0: fcvt_d_w({{
+ Fd = (double)Rs1_sw;
+ }}, FloatCvtOp);
+ 0x1: fcvt_d_wu({{
+ Fd = (double)Rs1_uw;
+ }}, FloatCvtOp);
+ 0x2: fcvt_d_l({{
+ Fd = (double)Rs1_sd;
+ }}, FloatCvtOp);
+ 0x3: fcvt_d_lu({{
+ Fd = (double)Rs1;
+ }}, FloatCvtOp);
}
- }}, IsDirectControl, IsCondControl);
- 0x5: bge({{
- if (Rs1_sd >= Rs2_sd) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
- }
- }}, IsDirectControl, IsCondControl);
- 0x6: bltu({{
- if (Rs1 < Rs2) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
+ 0x70: decode ROUND_MODE {
+ 0x0: fmv_x_s({{
+ Rd = (uint32_t)Fs1_bits;
+ if ((Rd&0x80000000) != 0) {
+ Rd |= (0xFFFFFFFFULL << 32);
+ }
+ }}, FloatCvtOp);
+ 0x1: fclass_s({{
+ uint32_t temp;
+ float fs1 = reinterpret_cast<float&>(temp = Fs1_bits);
+ switch (fpclassify(fs1)) {
+ case FP_INFINITE:
+ if (signbit(fs1)) {
+ Rd = 1 << 0;
+ } else {
+ Rd = 1 << 7;
+ }
+ break;
+ case FP_NAN:
+ if (issignalingnan(fs1)) {
+ Rd = 1 << 8;
+ } else {
+ Rd = 1 << 9;
+ }
+ break;
+ case FP_ZERO:
+ if (signbit(fs1)) {
+ Rd = 1 << 3;
+ } else {
+ Rd = 1 << 4;
+ }
+ break;
+ case FP_SUBNORMAL:
+ if (signbit(fs1)) {
+ Rd = 1 << 2;
+ } else {
+ Rd = 1 << 5;
+ }
+ break;
+ case FP_NORMAL:
+ if (signbit(fs1)) {
+ Rd = 1 << 1;
+ } else {
+ Rd = 1 << 6;
+ }
+ break;
+ default:
+ panic("Unknown classification for operand.");
+ break;
+ }
+ }});
}
- }}, IsDirectControl, IsCondControl);
- 0x7: bgeu({{
- if (Rs1 >= Rs2) {
- NPC = PC + imm;
- } else {
- NPC = NPC;
+ 0x71: decode ROUND_MODE {
+ 0x0: fmv_x_d({{
+ Rd = Fs1_bits;
+ }}, FloatCvtOp);
+ 0x1: fclass_d({{
+ switch (fpclassify(Fs1)) {
+ case FP_INFINITE:
+ if (signbit(Fs1)) {
+ Rd = 1 << 0;
+ } else {
+ Rd = 1 << 7;
+ }
+ break;
+ case FP_NAN:
+ if (issignalingnan(Fs1)) {
+ Rd = 1 << 8;
+ } else {
+ Rd = 1 << 9;
+ }
+ break;
+ case FP_ZERO:
+ if (signbit(Fs1)) {
+ Rd = 1 << 3;
+ } else {
+ Rd = 1 << 4;
+ }
+ break;
+ case FP_SUBNORMAL:
+ if (signbit(Fs1)) {
+ Rd = 1 << 2;
+ } else {
+ Rd = 1 << 5;
+ }
+ break;
+ case FP_NORMAL:
+ if (signbit(Fs1)) {
+ Rd = 1 << 1;
+ } else {
+ Rd = 1 << 6;
+ }
+ break;
+ default:
+ panic("Unknown classification for operand.");
+ break;
+ }
+ }});
}
- }}, IsDirectControl, IsCondControl);
+ 0x78: fmv_s_x({{
+ Fd_bits = (uint64_t)Rs1_uw;
+ }}, FloatCvtOp);
+ 0x79: fmv_d_x({{
+ Fd_bits = Rs1;
+ }}, FloatCvtOp);
+ }
}
- }
-
- 0x67: decode FUNCT3 {
- 0x0: Jump::jalr({{
- Rd = NPC;
- NPC = (imm + Rs1) & (~0x1);
- }}, IsIndirectControl, IsUncondControl, IsCall);
- }
-
- 0x6f: JOp::jal({{
- Rd = NPC;
- NPC = PC + imm;
- }}, IsDirectControl, IsUncondControl, IsCall);
- 0x73: decode FUNCT3 {
- format SystemOp {
- 0x0: decode FUNCT12 {
- 0x0: ecall({{
- fault = std::make_shared<SyscallFault>();
- }}, IsSerializeAfter, IsNonSpeculative, IsSyscall, No_OpClass);
- 0x1: ebreak({{
- fault = std::make_shared<BreakpointFault>();
- }}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
- 0x100: eret({{
- fault = std::make_shared<UnimplementedFault>("eret");
- }}, No_OpClass);
+ 0x18: decode FUNCT3 {
+ format BOp {
+ 0x0: beq({{
+ if (Rs1 == Rs2) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
+ 0x1: bne({{
+ if (Rs1 != Rs2) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
+ 0x4: blt({{
+ if (Rs1_sd < Rs2_sd) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
+ 0x5: bge({{
+ if (Rs1_sd >= Rs2_sd) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
+ 0x6: bltu({{
+ if (Rs1 < Rs2) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
+ 0x7: bgeu({{
+ if (Rs1 >= Rs2) {
+ NPC = PC + imm;
+ } else {
+ NPC = NPC;
+ }
+ }}, IsDirectControl, IsCondControl);
}
}
- format CSROp {
- 0x1: csrrw({{
- Rd = xc->readMiscReg(csr);
- xc->setMiscReg(csr, Rs1);
- }}, IsNonSpeculative, No_OpClass);
- 0x2: csrrs({{
- Rd = xc->readMiscReg(csr);
- if (Rs1 != 0) {
- xc->setMiscReg(csr, Rd | Rs1);
- }
- }}, IsNonSpeculative, No_OpClass);
- 0x3: csrrc({{
- Rd = xc->readMiscReg(csr);
- if (Rs1 != 0) {
- xc->setMiscReg(csr, Rd & ~Rs1);
- }
- }}, IsNonSpeculative, No_OpClass);
- 0x5: csrrwi({{
- Rd = xc->readMiscReg(csr);
- xc->setMiscReg(csr, uimm);
- }}, IsNonSpeculative, No_OpClass);
- 0x6: csrrsi({{
- Rd = xc->readMiscReg(csr);
- if (uimm != 0) {
- xc->setMiscReg(csr, Rd | uimm);
- }
- }}, IsNonSpeculative, No_OpClass);
- 0x7: csrrci({{
- Rd = xc->readMiscReg(csr);
- if (uimm != 0) {
- xc->setMiscReg(csr, Rd & ~uimm);
+
+ 0x19: decode FUNCT3 {
+ 0x0: Jump::jalr({{
+ Rd = NPC;
+ NPC = (imm + Rs1) & (~0x1);
+ }}, IsIndirectControl, IsUncondControl, IsCall);
+ }
+
+ 0x1b: JOp::jal({{
+ Rd = NPC;
+ NPC = PC + imm;
+ }}, IsDirectControl, IsUncondControl, IsCall);
+
+ 0x1c: decode FUNCT3 {
+ format SystemOp {
+ 0x0: decode FUNCT12 {
+ 0x0: ecall({{
+ fault = make_shared<SyscallFault>();
+ }}, IsSerializeAfter, IsNonSpeculative, IsSyscall,
+ No_OpClass);
+ 0x1: ebreak({{
+ fault = make_shared<BreakpointFault>();
+ }}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
+ 0x100: eret({{
+ fault = make_shared<UnimplementedFault>("eret");
+ }}, No_OpClass);
}
- }}, IsNonSpeculative, No_OpClass);
+ }
+ format CSROp {
+ 0x1: csrrw({{
+ Rd = xc->readMiscReg(csr);
+ xc->setMiscReg(csr, Rs1);
+ }}, IsNonSpeculative, No_OpClass);
+ 0x2: csrrs({{
+ Rd = xc->readMiscReg(csr);
+ if (Rs1 != 0) {
+ xc->setMiscReg(csr, Rd | Rs1);
+ }
+ }}, IsNonSpeculative, No_OpClass);
+ 0x3: csrrc({{
+ Rd = xc->readMiscReg(csr);
+ if (Rs1 != 0) {
+ xc->setMiscReg(csr, Rd & ~Rs1);
+ }
+ }}, IsNonSpeculative, No_OpClass);
+ 0x5: csrrwi({{
+ Rd = xc->readMiscReg(csr);
+ xc->setMiscReg(csr, uimm);
+ }}, IsNonSpeculative, No_OpClass);
+ 0x6: csrrsi({{
+ Rd = xc->readMiscReg(csr);
+ if (uimm != 0) {
+ xc->setMiscReg(csr, Rd | uimm);
+ }
+ }}, IsNonSpeculative, No_OpClass);
+ 0x7: csrrci({{
+ Rd = xc->readMiscReg(csr);
+ if (uimm != 0) {
+ xc->setMiscReg(csr, Rd & ~uimm);
+ }
+ }}, IsNonSpeculative, No_OpClass);
+ }
}
}
-}
+} \ No newline at end of file
diff --git a/src/arch/riscv/isa/formats/compressed.isa b/src/arch/riscv/isa/formats/compressed.isa
new file mode 100644
index 000000000..1fd2319fd
--- /dev/null
+++ b/src/arch/riscv/isa/formats/compressed.isa
@@ -0,0 +1,102 @@
+// -*- mode:c++ -*-
+
+// Copyright (c) 2015 RISC-V Foundation
+// Copyright (c) 2017 The University of Virginia
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met: redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer;
+// redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution;
+// neither the name of the copyright holders nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Alec Roelke
+
+output header {{
+ /**
+ * Base class for compressed operations that work only on registers
+ */
+ class CompRegOp : public RiscvStaticInst
+ {
+ protected:
+ /// Constructor
+ CompRegOp(const char *mnem, MachInst _machInst, OpClass __opClass)
+ : RiscvStaticInst(mnem, _machInst, __opClass)
+ {}
+
+ std::string
+ generateDisassembly(Addr pc, const SymbolTable *symtab) const;
+ };
+}};
+
+output decoder {{
+ std::string
+ CompRegOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
+ {
+ std::stringstream ss;
+ ss << mnemonic << ' ' << registerName(_destRegIdx[0]) << ", " <<
+ registerName(_srcRegIdx[0]);
+ return ss.str();
+ }
+}};
+
+def format CROp(code, *opt_flags) {{
+ iop = InstObjParams(name, Name, 'CompRegOp', code, opt_flags)
+ header_output = BasicDeclare.subst(iop)
+ decoder_output = BasicConstructor.subst(iop)
+ decode_block = BasicDecode.subst(iop)
+ exec_output = BasicExecute.subst(iop)
+}};
+
+def format CIOp(imm_code, code, *opt_flags) {{
+ regs = ['_destRegIdx[0]','_srcRegIdx[0]']
+ iop = InstObjParams(name, Name, 'ImmOp',
+ {'code': code, 'imm_code': imm_code,
+ 'regs': ','.join(regs)}, opt_flags)
+ header_output = ImmDeclare.subst(iop)
+ decoder_output = ImmConstructor.subst(iop)
+ decode_block = BasicDecode.subst(iop)
+ exec_output = ImmExecute.subst(iop)
+}};
+
+def format CUIOp(imm_code, code, *opt_flags) {{
+ regs = ['_destRegIdx[0]','_srcRegIdx[0]']
+ iop = InstObjParams(name, Name, 'UImmOp',
+ {'code': code, 'imm_code': imm_code,
+ 'regs': ','.join(regs)}, opt_flags)
+ header_output = ImmDeclare.subst(iop)
+ decoder_output = ImmConstructor.subst(iop)
+ decode_block = BasicDecode.subst(iop)
+ exec_output = ImmExecute.subst(iop)
+}};
+
+def format CompressedLoad(ldisp_code, memacc_code,
+ ea_code, mem_flags=[], inst_flags=[]) {{
+ (header_output, decoder_output, decode_block, exec_output) = \
+ LoadStoreBase(name, Name, ldisp_code, ea_code, memacc_code, mem_flags,
+ inst_flags, 'Load', exec_template_base='Load')
+}};
+
+def format CompressedStore(sdisp_code, memacc_code,
+ ea_code, mem_flags=[], inst_flags=[]) {{
+ (header_output, decoder_output, decode_block, exec_output) = \
+ LoadStoreBase(name, Name, sdisp_code, ea_code, memacc_code, mem_flags,
+ inst_flags, 'Store', exec_template_base='Store')
+}}; \ No newline at end of file
diff --git a/src/arch/riscv/isa/formats/formats.isa b/src/arch/riscv/isa/formats/formats.isa
index e13cac263..df2b3b84a 100644
--- a/src/arch/riscv/isa/formats/formats.isa
+++ b/src/arch/riscv/isa/formats/formats.isa
@@ -33,11 +33,14 @@
// Include the basic format
##include "basic.isa"
-//Include the type formats
+// Include the type formats
##include "standard.isa"
##include "mem.isa"
##include "fp.isa"
##include "amo.isa"
+// Include formats for nonstandard extensions
+##include "compressed.isa"
+
// Include the unknown
##include "unknown.isa"
diff --git a/src/arch/riscv/isa/formats/mem.isa b/src/arch/riscv/isa/formats/mem.isa
index 4ae8eb41a..9b6bc9eb5 100644
--- a/src/arch/riscv/isa/formats/mem.isa
+++ b/src/arch/riscv/isa/formats/mem.isa
@@ -46,11 +46,8 @@ output header {{
/// Constructor
Load(const char *mnem, ExtMachInst _machInst, OpClass __opClass)
- : RiscvStaticInst(mnem, _machInst, __opClass), ldisp(IMM12)
- {
- if (IMMSIGN > 0)
- ldisp |= ~((uint64_t)0xFFF);
- }
+ : RiscvStaticInst(mnem, _machInst, __opClass), ldisp(0)
+ {}
std::string
generateDisassembly(Addr pc, const SymbolTable *symtab) const;
@@ -68,9 +65,9 @@ output header {{
/// Constructor
Store(const char *mnem, ExtMachInst _machInst, OpClass __opClass)
- : RiscvStaticInst(mnem, _machInst, __opClass), sdisp(IMM5)
+ : RiscvStaticInst(mnem, _machInst, __opClass), sdisp(0)
{
- sdisp |= IMM7 << 5;
+ sdisp = IMM5 | (IMM7 << 5);
if (IMMSIGN > 0)
sdisp |= ~((uint64_t)0xFFF);
}
@@ -143,6 +140,7 @@ def template LoadStoreConstructor {{
%(base_class)s("%(mnemonic)s", machInst, %(op_class)s)
{
%(constructor)s;
+ %(offset_code)s;
}
}};
@@ -168,16 +166,17 @@ def template EACompExecute {{
}};
let {{
-def LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
- base_class, postacc_code='', decode_template=BasicDecode,
+def LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags,
+ inst_flags, base_class, postacc_code='', decode_template=BasicDecode,
exec_template_base=''):
# Make sure flags are in lists (convert to lists if not).
mem_flags = makeList(mem_flags)
- inst_flags = makeList(inst_flags) # + ['IsNonSpeculative']
+ inst_flags = makeList(inst_flags)
iop = InstObjParams(name, Name, base_class,
- { 'ea_code':ea_code, 'memacc_code':memacc_code,
- 'postacc_code':postacc_code }, inst_flags)
+ {'offset_code': offset_code, 'ea_code': ea_code,
+ 'memacc_code': memacc_code, 'postacc_code': postacc_code },
+ inst_flags)
if mem_flags:
mem_flags = [ 'Request::%s' % flag for flag in mem_flags ]
@@ -342,14 +341,24 @@ def template StoreCompleteAcc {{
def format Load(memacc_code, ea_code = {{EA = Rs1 + ldisp;}}, mem_flags=[],
inst_flags=[]) {{
+ offset_code = """
+ ldisp = IMM12;
+ if (IMMSIGN > 0)
+ ldisp |= ~((uint64_t)0xFFF);
+ """
(header_output, decoder_output, decode_block, exec_output) = \
- LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
- 'Load', exec_template_base='Load')
+ LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags,
+ inst_flags, 'Load', exec_template_base='Load')
}};
def format Store(memacc_code, ea_code={{EA = Rs1 + sdisp;}}, mem_flags=[],
inst_flags=[]) {{
+ offset_code = """
+ sdisp = IMM5 | (IMM7 << 5);
+ if (IMMSIGN > 0)
+ sdisp |= ~((uint64_t)0xFFF);
+ """
(header_output, decoder_output, decode_block, exec_output) = \
- LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
- 'Store', exec_template_base='Store')
+ LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags,
+ inst_flags, 'Store', exec_template_base='Store')
}};
diff --git a/src/arch/riscv/isa/includes.isa b/src/arch/riscv/isa/includes.isa
index 443db6786..82d1794fd 100644
--- a/src/arch/riscv/isa/includes.isa
+++ b/src/arch/riscv/isa/includes.isa
@@ -65,6 +65,7 @@ output decoder {{
#include "sim/full_system.hh"
using namespace RiscvISA;
+using namespace std;
}};
output exec {{
@@ -90,4 +91,5 @@ output exec {{
#include "sim/system.hh"
using namespace RiscvISA;
+using namespace std;
}};
diff --git a/src/arch/riscv/isa/operands.isa b/src/arch/riscv/isa/operands.isa
index 7a8385d0c..8dc1a3b82 100644
--- a/src/arch/riscv/isa/operands.isa
+++ b/src/arch/riscv/isa/operands.isa
@@ -49,6 +49,12 @@ def operands {{
'Rs1': ('IntReg', 'ud', 'RS1', 'IsInteger', 2),
'Rs2': ('IntReg', 'ud', 'RS2', 'IsInteger', 3),
'Rt': ('IntReg', 'ud', 'AMOTempReg', 'IsInteger', 4),
+ 'Rc1': ('IntReg', 'ud', 'RC1', 'IsInteger', 2),
+ 'Rc2': ('IntReg', 'ud', 'RC2', 'IsInteger', 3),
+ 'Rp1': ('IntReg', 'ud', 'RP1 + 8', 'IsInteger', 2),
+ 'Rp2': ('IntReg', 'ud', 'RP2 + 8', 'IsInteger', 3),
+ 'ra': ('IntReg', 'ud', 'ReturnAddrReg', 'IsInteger', 1),
+ 'sp': ('IntReg', 'ud', 'StackPointerReg', 'IsInteger', 2),
'Fd': ('FloatReg', 'df', 'FD', 'IsFloating', 1),
'Fd_bits': ('FloatReg', 'ud', 'FD', 'IsFloating', 1),
@@ -58,6 +64,12 @@ def operands {{
'Fs2_bits': ('FloatReg', 'ud', 'FS2', 'IsFloating', 3),
'Fs3': ('FloatReg', 'df', 'FS3', 'IsFloating', 4),
'Fs3_bits': ('FloatReg', 'ud', 'FS3', 'IsFloating', 4),
+ 'Fc1': ('FloatReg', 'df', 'FC1', 'IsFloating', 1),
+ 'Fc1_bits': ('FloatReg', 'ud', 'FC1', 'IsFloating', 1),
+ 'Fc2': ('FloatReg', 'df', 'FC2', 'IsFloatReg', 2),
+ 'Fc2_bits': ('FloatReg', 'ud', 'FC2', 'IsFloating', 2),
+ 'Fp2': ('FloatReg', 'df', 'FP2 + 8', 'IsFloating', 2),
+ 'Fp2_bits': ('FloatReg', 'ud', 'FP2 + 8', 'IsFloating', 2),
#Memory Operand
'Mem': ('Mem', 'ud', None, ('IsMemRef', 'IsLoad', 'IsStore'), 5),
diff --git a/src/arch/riscv/types.hh b/src/arch/riscv/types.hh
index 976a9e70e..f17d0b235 100644
--- a/src/arch/riscv/types.hh
+++ b/src/arch/riscv/types.hh
@@ -12,7 +12,7 @@
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
- * Copyright (c) 2016 The University of Virginia
+ * Copyright (c) 2017 The University of Virginia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,11 +50,35 @@
namespace RiscvISA
{
+
typedef uint32_t MachInst;
typedef uint64_t ExtMachInst;
-typedef GenericISA::UPCState<MachInst> PCState;
-}
+class PCState : public GenericISA::UPCState<MachInst>
+{
+ private:
+ bool _compressed;
+
+ public:
+ PCState() : UPCState() { _compressed = false; }
+ PCState(Addr val) : UPCState(val) { _compressed = false; }
+ void compressed(bool c) { _compressed = c; }
+ bool compressed() { return _compressed; }
+
+ bool
+ branching() const
+ {
+ if (_compressed) {
+ return npc() != pc() + sizeof(MachInst)/2 ||
+ nupc() != upc() + 1;
+ } else {
+ return npc() != pc() + sizeof(MachInst) ||
+ nupc() != upc() + 1;
+ }
+ }
+};
+
+}
-#endif // __ARCH_RISCV_TYPES_HH__
+#endif // __ARCH_RISCV_TYPES_HH__ \ No newline at end of file