blob: c7eefbc79c6a6f5c63c3a04ad7dee6ad163977c1 [file] [log] [blame] [edit]
// -*- mode:c++ -*-
// Copyright (c) 2015 RISC-V Foundation
// Copyright (c) 2017 The University of Virginia
// Copyright (c) 2020 Barkhausen Institut
// Copyright (c) 2021 StreamComputing Corp
// Copyright (c) 2022 Google LLC
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met: redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer;
// redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution;
// neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////
//
// The RISC-V ISA decoder
//
// In theory, all registers should be sign extended if not operating in the
// full MXLEN register, but that will cause memory address out of range as it is
// always regarded as uint64. So we'll zero extend PC related registers and
// memory address, and sign extend others.
decode QUADRANT default Unknown::unknown() {
0x0: decode COPCODE {
0x0: CIAddi4spnOp::c_addi4spn({{
imm = CIMM8<1:1> << 2 |
CIMM8<0:0> << 3 |
CIMM8<7:6> << 4 |
CIMM8<5:2> << 6;
}}, {{
if (imm == 0)
return std::make_shared<IllegalInstFault>("immediate = 0",
machInst);
Rp2 = rvSext(sp + imm);
}}, uint64_t);
format CompressedLoad {
0x1: c_fld({{
offset = CIMM3 << 3 | CIMM2 << 6;
}}, {{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>("FPU is off",
machInst);
Fp2_bits = Mem;
}}, {{
EA = rvZext(Rp1 + offset);
}});
0x2: c_lw({{
offset = CIMM2<1:1> << 2 |
CIMM3 << 3 |
CIMM2<0:0> << 6;
}}, {{
Rp2_sd = Mem_sw;
}}, {{
EA = rvZext(Rp1 + offset);
}});
0x3: decode RVTYPE {
0x0: c_flw({{
offset = CIMM2<1:1> << 2 |
CIMM3 << 3 |
CIMM2<0:0> << 6;
}}, {{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>("FPU is off",
machInst);
freg_t fd = freg(f32(Mem_uw));
Fp2_bits = fd.v;
}}, {{
EA = (uint32_t)(Rp1_uw + offset);
}});
0x1: c_ld({{
offset = CIMM3 << 3 | CIMM2 << 6;
}}, {{
Rp2_sd = Mem_sd;
}}, {{
EA = Rp1 + offset;
}});
}
}
format CompressedStore {
0x5: c_fsd({{
offset = CIMM3 << 3 | CIMM2 << 6;
}}, {{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>("FPU is off",
machInst);
Mem = Fp2_bits;
}}, {{
EA = rvZext(Rp1 + offset);
}});
0x6: c_sw({{
offset = CIMM2<1:1> << 2 |
CIMM3 << 3 |
CIMM2<0:0> << 6;
}}, {{
Mem_uw = Rp2_uw;
}}, ea_code={{
EA = rvZext(Rp1 + offset);
}});
0x7: decode RVTYPE {
0x0: c_fsw({{
offset = CIMM2<1:1> << 2 |
CIMM3 << 3 |
CIMM2<0:0> << 6;
}}, {{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>("FPU is off",
machInst);
Mem_uw = unboxF32(boxF32(Fs2_bits));
}}, {{
EA = (uint32_t)(Rp1_uw + offset);
}});
0x1: c_sd({{
offset = CIMM3 << 3 | CIMM2 << 6;
}}, {{
Mem_ud = Rp2_ud;
}}, {{
EA = Rp1 + offset;
}});
}
}
}
0x1: decode COPCODE {
0x0: CIOp::c_addi({{
imm = sext<6>(CIMM5 | (CIMM1 << 5));
}}, {{
if ((RC1 == 0) != (imm == 0)) {
if (RC1 == 0) {
// imm != 0 is HINT
} else {
// imm == 0 is HINT
}
}
Rc1_sd = rvSext(Rc1_sd + imm);
}});
0x1: decode RVTYPE {
0x0: CJOp::c_jal({{
ra_sw = NPC_uw;
NPC_uw = PC_uw + imm;
}}, IsDirectControl, IsUncondControl, IsCall);
0x1: CIOp::c_addiw({{
imm = sext<6>(CIMM5 | (CIMM1 << 5));
}}, {{
if (RC1 == 0) {
return std::make_shared<IllegalInstFault>(
"source reg x0", machInst);
}
Rc1_sw = (int32_t)(Rc1_sw + imm);
}});
}
0x2: CIOp::c_li({{
imm = sext<6>(CIMM5 | (CIMM1 << 5));
}}, {{
// RC1 == 0 is HINT
Rc1_sd = imm;
}});
0x3: decode RC1 {
0x2: CIOp::c_addi16sp({{
imm = sext<10>((CIMM5<4:4> << 4) |
(CIMM5<0:0> << 5) |
(CIMM5<3:3> << 6) |
(CIMM5<2:1> << 7) |
(CIMM1 << 9));
}}, {{
if (imm == 0) {
return std::make_shared<IllegalInstFault>(
"immediate = 0", machInst);
}
sp_sd = rvSext(sp_sd + imm);
}});
default: CIOp::c_lui({{
imm = sext<6>(CIMM5 | (CIMM1 << 5)) << 12;
}}, {{
// RC1 == 0 is HINT
if (imm == 0) {
return std::make_shared<IllegalInstFault>(
"immediate = 0", machInst);
}
Rc1_sd = imm;
}});
}
0x4: decode CFUNCT2HIGH {
format CIOp {
0x0: c_srli({{
imm = CIMM5 | (CIMM1 << 5);
}}, {{
if (rvSelect((bool)CIMM1, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
if (imm == 0) {
// C.SRLI64, HINT for RV32/RV64
}
// The MSB can never be 1, hence no need to sign ext.
Rp1 = rvZext(Rp1) >> imm;
}}, uint64_t);
0x1: c_srai({{
imm = CIMM5 | (CIMM1 << 5);
}}, {{
if (rvSelect((bool)CIMM1, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
if (imm == 0) {
// C.SRAI64, HINT for RV32/RV64
}
Rp1_sd = rvSext(Rp1_sd) >> imm;
}}, uint64_t);
0x2: c_andi({{
imm = CIMM5;
if (CIMM1 > 0)
imm |= ~((uint64_t)0x1F);
}}, {{
Rp1 = rvSext(Rp1 & imm);
}}, uint64_t);
}
format CompressedROp {
0x3: decode CFUNCT1 {
0x0: decode CFUNCT2LOW {
0x0: c_sub({{
Rp1 = rvSext(Rp1 - Rp2);
}});
0x1: c_xor({{
Rp1 = rvSext(Rp1 ^ Rp2);
}});
0x2: c_or({{
Rp1 = rvSext(Rp1 | Rp2);
}});
0x3: c_and({{
Rp1 = rvSext(Rp1 & Rp2);
}});
}
0x1: decode RVTYPE {
0x1: decode CFUNCT2LOW {
0x0: c_subw({{
Rp1_sd = (int32_t)Rp1_sd - Rp2_sw;
}});
0x1: c_addw({{
Rp1_sd = (int32_t)Rp1_sd + Rp2_sw;
}});
}
}
}
}
}
0x5: CJOp::c_j({{
NPC = rvZext(PC + imm);
}}, IsDirectControl, IsUncondControl);
format CBOp {
0x6: c_beqz({{
if (rvSext(Rp1) == 0)
NPC = rvZext(PC + imm);
else
NPC = NPC;
}}, IsDirectControl, IsCondControl);
0x7: c_bnez({{
if (rvSext(Rp1) != 0)
NPC = rvZext(PC + imm);
else
NPC = NPC;
}}, IsDirectControl, IsCondControl);
}
}
0x2: decode COPCODE {
0x0: CIOp::c_slli({{
imm = CIMM5 | (CIMM1 << 5);
}}, {{
if (rvSelect((bool)CIMM1, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
if (imm == 0) {
// C.SLLI64, HINT for RV32/RV64
}
// RC1 == 0 is HINT
Rc1 = rvSext(Rc1 << imm);
}}, uint64_t);
format CompressedLoad {
0x1: c_fldsp({{
offset = CIMM5<4:3> << 3 |
CIMM1 << 5 |
CIMM5<2:0> << 6;
}}, {{
Fc1_bits = Mem;
}}, {{
EA = rvZext(sp + offset);
}});
0x2: c_lwsp({{
offset = CIMM5<4:2> << 2 |
CIMM1 << 5 |
CIMM5<1:0> << 6;
}}, {{
if (RC1 == 0) {
return std::make_shared<IllegalInstFault>(
"source reg x0", machInst);
}
Rc1_sw = Mem_sw;
}}, {{
EA = rvZext(sp + offset);
}});
0x3: decode RVTYPE {
0x0: c_flwsp({{
offset = CIMM5<4:2> << 2 |
CIMM1 << 5 |
CIMM5<1:0> << 6;
}}, {{
freg_t fd;
fd = freg(f32(Mem_uw));
Fd_bits = fd.v;
}}, {{
EA = (uint32_t)(sp_uw + offset);
}});
0x1: c_ldsp({{
offset = CIMM5<4:3> << 3 |
CIMM1 << 5 |
CIMM5<2:0> << 6;
}}, {{
if (RC1 == 0) {
return std::make_shared<IllegalInstFault>(
"source reg x0", machInst);
}
Rc1_sd = Mem_sd;
}}, {{
EA = sp + offset;
}});
}
}
0x4: decode CFUNCT1 {
0x0: decode RC2 {
0x0: Jump::c_jr({{
if (RC1 == 0) {
return std::make_shared<IllegalInstFault>(
"source reg x0", machInst);
}
NPC = rvZext(Rc1);
}}, IsIndirectControl, IsUncondControl);
default: CROp::c_mv({{
// RC1 == 0 is HINT
Rc1 = rvSext(Rc2);
}});
}
0x1: decode RC2 {
0x0: decode RC1 {
0x0: SystemOp::c_ebreak({{
return std::make_shared<BreakpointFault>(
xc->pcState());
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
default: Jump::c_jalr({{
ra = rvSext(NPC);
NPC = rvZext(Rc1);
}}, IsIndirectControl, IsUncondControl, IsCall);
}
default: CompressedROp::c_add({{
// RC1 == 0 is HINT
Rc1_sd = rvSext(Rc1_sd + Rc2_sd);
}});
}
}
format CompressedStore {
0x5: c_fsdsp({{
offset = CIMM6<5:3> << 3 |
CIMM6<2:0> << 6;
}}, {{
Mem_ud = Fc2_bits;
}}, {{
EA = rvZext(sp + offset);
}});
0x6: c_swsp({{
offset = CIMM6<5:2> << 2 |
CIMM6<1:0> << 6;
}}, {{
Mem_uw = Rc2_uw;
}}, {{
EA = rvZext(sp + offset);
}});
0x7: decode RVTYPE {
0x0: c_fswsp({{
offset = CIMM6<5:2> << 2 |
CIMM6<1:0> << 6;
}}, {{
Mem_uw = unboxF32(boxF32(Fs2_bits));
}}, {{
EA = (uint32_t)(sp_uw + offset);
}});
0x1: c_sdsp({{
offset = CIMM6<5:3> << 3 |
CIMM6<2:0> << 6;
}}, {{
Mem = Rc2;
}}, {{
EA = sp + offset;
}});
}
}
}
0x3: decode OPCODE5 {
0x00: decode FUNCT3 {
format Load {
0x0: lb({{
Rd_sd = Mem_sb;
}});
0x1: lh({{
Rd_sd = Mem_sh;
}});
0x2: lw({{
Rd_sd = Mem_sw;
}});
0x3: decode RVTYPE {
0x1: ld({{
Rd_sd = Mem_sd;
}});
}
0x4: lbu({{
Rd = Mem_ub;
}});
0x5: lhu({{
Rd = Mem_uh;
}});
0x6: decode RVTYPE {
0x1: lwu({{
Rd = Mem_uw;
}});
}
}
}
0x01: decode FUNCT3 {
format Load {
0x1: flh({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
freg_t fd;
fd = freg(f16(Mem_uh));
Fd_bits = fd.v;
}}, inst_flags=FloatMemReadOp);
0x2: flw({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
freg_t fd;
fd = freg(f32(Mem_uw));
Fd_bits = fd.v;
}}, inst_flags=FloatMemReadOp);
0x3: fld({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
freg_t fd;
fd = freg(f64(Mem));
Fd_bits = fd.v;
}}, inst_flags=FloatMemReadOp);
}
}
0x03: decode FUNCT3 {
format FenceOp {
0x0: fence({{
}}, uint64_t, IsReadBarrier, IsWriteBarrier, No_OpClass);
0x1: fence_i({{
}}, uint64_t, IsNonSpeculative, IsSerializeAfter, No_OpClass);
}
}
0x04: decode FUNCT3 {
0x1: decode FS3 {
format IOp {
0x00: slli({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
Rd = rvSext(Rs1 << imm);
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x01: decode RVTYPE {
0x0: zip({{
Rd_sw = _rvk_emu_zip_32(Rs1_sw);
}}, imm_code = {{ imm = SHAMT5; }});
}
0x02: decode FS2 {
0x0: sha256sum0({{
Rd_sw = _rvk_emu_sha256sum0(Rs1_sw);
}});
0x1: sha256sum1({{
Rd_sw = _rvk_emu_sha256sum1(Rs1_sw);
}});
0x2: sha256sig0({{
Rd_sw = _rvk_emu_sha256sig0(Rs1_sw);
}});
0x3: sha256sig1({{
Rd_sw = _rvk_emu_sha256sig1(Rs1_sw);
}});
0x4: decode RVTYPE {
0x1: sha512sum0({{
Rd_sd = _rvk_emu_sha512sum0(Rs1_sd);
}});
}
0x5: decode RVTYPE {
0x1: sha512sum1({{
Rd_sd = _rvk_emu_sha512sum1(Rs1_sd);
}});
}
0x6: decode RVTYPE {
0x1: sha512sig0({{
Rd_sd = _rvk_emu_sha512sig0(Rs1_sd);
}});
}
0x7: decode RVTYPE {
0x1: sha512sig1({{
Rd_sd = _rvk_emu_sha512sig1(Rs1_sd);
}});
}
0x8: sm3p0({{
Rd_sw = _rvk_emu_sm3p0(Rs1_sw);
}});
0x9: sm3p1({{
Rd_sw = _rvk_emu_sm3p1(Rs1_sw);
}});
}
0x05: bseti({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
uint64_t index = imm & rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 | (UINT64_C(1) << index));
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x06: decode BIT24 {
0x0: decode RVTYPE {
0x1: aes64im({{
Rd_sd = _rvk_emu_aes64im(Rs1_sd);
}});
}
0x1: decode RVTYPE {
0x1: aes64ks1i({{
Rd_sd = _rvk_emu_aes64ks1i(Rs1_sd, imm);
}}, imm_type = int32_t, imm_code={{ imm = RNUM; }});
}
}
0x09: bclri({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
uint64_t index = imm & rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 & (~(UINT64_C(1) << index)));
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x0d: binvi({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
uint64_t index = imm & rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 ^ (UINT64_C(1) << index));
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
}
format ROp {
0x0c: decode RS2 {
0x00: clz({{
Rd = (machInst.rv_type == RV32) ? clz32(Rs1) : clz64(Rs1);
}});
0x01: ctz({{
Rd = (machInst.rv_type == RV32) ? ctz32(Rs1) : ctz64(Rs1);
}});
0x02: cpop({{
Rd = (machInst.rv_type == RV32) ? popCount(Rs1<31:0>) : popCount(Rs1);
}});
0x04: sext_b({{
Rd = sext<8>(Rs1_ub);
}});
0x05: sext_h({{
Rd = sext<16>(Rs1_uh);
}});
}
}
}
format IOp {
0x0: addi({{
Rd_sd = rvSext(Rs1_sd + imm);
}});
0x2: slti({{
Rd = (rvSext(Rs1_sd) < imm) ? 1 : 0;
}});
0x3: sltiu({{
Rd = (rvZext(Rs1) < imm) ? 1 : 0;
}}, uint64_t, imm_code = {{ imm = rvZext(sext<12>(IMM12)); }});
0x4: xori({{
Rd = rvSext(Rs1 ^ imm);
}}, uint64_t);
0x5: decode FS3 {
0x0: srli({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
Rd = rvSext(rvZext(Rs1) >> imm);
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x1: decode RVTYPE {
0x0: unzip({{
Rd_sw = _rvk_emu_unzip_32(Rs1_sw);
}}, imm_code = {{ imm = SHAMT5; }});
}
0x5: orc_b({{
uint64_t result = 0;
result |= (Rs1<7:0> ? UINT64_C(0xff) : 0x0);
result |= (Rs1<15:8> ? UINT64_C(0xff) : 0x0) << 8;
result |= (Rs1<23:16> ? UINT64_C(0xff) : 0x0) << 16;
result |= (Rs1<31:24> ? UINT64_C(0xff) : 0x0) << 24;
result |= (Rs1<39:32> ? UINT64_C(0xff) : 0x0) << 32;
result |= (Rs1<47:40> ? UINT64_C(0xff) : 0x0) << 40;
result |= (Rs1<55:48> ? UINT64_C(0xff) : 0x0) << 48;
result |= (Rs1<63:56> ? UINT64_C(0xff) : 0x0) << 56;
Rd = rvSext(result);
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x8: srai({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
Rd_sd = rvSext(Rs1_sd) >> imm;
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0x9: bexti({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
uint64_t index = imm & rvSelect(32 - 1, 64 - 1);
Rd = (Rs1 >> index) & 0x1;
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0xc: rori({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
uint64_t xlen = rvSelect(32, 64);
Rd = rvSext((rvZext(Rs1) >> imm)
| (Rs1 << ((xlen - imm) & (xlen - 1))));
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0xd: decode RS2 {
0x18: ROp::rev8({{
if (rvSelect((bool)SHAMT6BIT5, false)) {
return std::make_shared<IllegalInstFault>(
"shmat[5] != 0", machInst);
}
if (machInst.rv_type == RV32) {
Rd_sd = _rvk_emu_grev_32(Rs1_sd, 0x18);
} else {
Rd_sd = _rvk_emu_grev_64(Rs1_sd, 0x38);
}
}});
0x07: ROp::brev8({{
if (machInst.rv_type == RV32) {
Rd_sd = _rvk_emu_brev8_32(Rs1_sd);
} else {
Rd_sd = _rvk_emu_brev8_64(Rs1_sd);
}
}});
}
}
0x6: ori({{
Rd = rvSext(Rs1 | imm);
}}, uint64_t);
0x7: andi({{
Rd = rvSext(Rs1 & imm);
}}, uint64_t);
}
}
0x05: UOp::auipc({{
Rd = rvSext(PC + (sext<20>(imm) << 12));
}});
0x06: decode RVTYPE {
0x1: decode FUNCT3 {
format IOp {
0x0: addiw({{
Rd_sw = (int32_t)(Rs1_sw + imm);
}}, int32_t);
0x1: decode FS3 {
0x0: slliw({{
Rd_sd = Rs1_sw << imm;
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }});
0x1: slli_uw({{
Rd = ((uint64_t)(Rs1_uw)) << imm;
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT6; }});
0xc: decode FS2 {
0x0: clzw({{
Rd = clz32(Rs1);
}});
0x1: ctzw({{
Rd = ctz32(Rs1);
}});
0x2: cpopw({{
Rd = popCount(Rs1<31:0>);
}});
}
}
0x5: decode FS3 {
0x0: srliw({{
Rd_sd = (int32_t)(Rs1_uw >> imm);
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }});
0x8: sraiw({{
Rd_sd = Rs1_sw >> imm;
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }});
0xc: roriw({{
Rd = (int32_t) ((Rs1_uw >> imm) | (Rs1_uw << ((32 - imm) & (32 - 1))));
}}, imm_type = uint64_t, imm_code = {{ imm = SHAMT5; }});
}
}
}
}
0x08: decode FUNCT3 {
format Store {
0x0: sb({{
Mem_ub = Rs2_ub;
}});
0x1: sh({{
Mem_uh = Rs2_uh;
}});
0x2: sw({{
Mem_uw = Rs2_uw;
}});
0x3: decode RVTYPE {
0x1: sd({{
Mem_ud = Rs2_ud;
}});
}
}
}
0x09: decode FUNCT3 {
format Store {
0x1: fsh({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
Mem_uh = unboxF16(boxF16(Fs2_bits));
}}, inst_flags=FloatMemWriteOp);
0x2: fsw({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
Mem_uw = unboxF32(boxF32(Fs2_bits));
}}, inst_flags=FloatMemWriteOp);
0x3: fsd({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
if (status.fs == FPUStatus::OFF)
return std::make_shared<IllegalInstFault>(
"FPU is off", machInst);
Mem_ud = Fs2_bits;
}}, inst_flags=FloatMemWriteOp);
}
}
0x0b: decode FUNCT3 {
0x2: decode AMOFUNCT {
0x2: LoadReserved::lr_w({{
Rd_sd = Mem_sw;
}}, mem_flags=LLSC);
0x3: StoreCond::sc_w({{
Mem_uw = Rs2_uw;
}}, {{
Rd = rvSext(result);
}}, inst_flags=IsStoreConditional, mem_flags=LLSC);
0x0: AtomicMemOp::amoadd_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<int32_t> *amo_op =
new AtomicGenericOp<int32_t>(Rs2_sw,
[](int32_t* b, int32_t a){ *b += a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x1: AtomicMemOp::amoswap_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x4: AtomicMemOp::amoxor_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ *b ^= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x8: AtomicMemOp::amoor_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ *b |= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0xc: AtomicMemOp::amoand_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ *b &= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x10: AtomicMemOp::amomin_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<int32_t> *amo_op =
new AtomicGenericOp<int32_t>(Rs2_sw,
[](int32_t* b, int32_t a){ if (a < *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x14: AtomicMemOp::amomax_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<int32_t> *amo_op =
new AtomicGenericOp<int32_t>(Rs2_sw,
[](int32_t* b, int32_t a){ if (a > *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x18: AtomicMemOp::amominu_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ if (a < *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x1c: AtomicMemOp::amomaxu_w({{
Rd_sd = Mem_sw;
}}, {{
TypedAtomicOpFunctor<uint32_t> *amo_op =
new AtomicGenericOp<uint32_t>(Rs2_uw,
[](uint32_t* b, uint32_t a){ if (a > *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
}
0x3: decode RVTYPE {
0x1: decode AMOFUNCT {
0x2: LoadReserved::lr_d({{
Rd_sd = Mem_sd;
}}, mem_flags=LLSC);
0x3: StoreCond::sc_d({{
Mem = Rs2;
}}, {{
Rd = result;
}}, mem_flags=LLSC, inst_flags=IsStoreConditional);
0x0: AtomicMemOp::amoadd_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<int64_t> *amo_op =
new AtomicGenericOp<int64_t>(Rs2_sd,
[](int64_t* b, int64_t a){ *b += a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x1: AtomicMemOp::amoswap_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){ *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x4: AtomicMemOp::amoxor_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){ *b ^= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x8: AtomicMemOp::amoor_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){ *b |= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0xc: AtomicMemOp::amoand_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){ *b &= a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x10: AtomicMemOp::amomin_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<int64_t> *amo_op =
new AtomicGenericOp<int64_t>(Rs2_sd,
[](int64_t* b, int64_t a){ if (a < *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x14: AtomicMemOp::amomax_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<int64_t> *amo_op =
new AtomicGenericOp<int64_t>(Rs2_sd,
[](int64_t* b, int64_t a){ if (a > *b) *b = a; });
}}, mem_flags=ATOMIC_RETURN_OP);
0x18: AtomicMemOp::amominu_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){
if (a < *b) *b = a;
});
}}, mem_flags=ATOMIC_RETURN_OP);
0x1c: AtomicMemOp::amomaxu_d({{
Rd_sd = Mem_sd;
}}, {{
TypedAtomicOpFunctor<uint64_t> *amo_op =
new AtomicGenericOp<uint64_t>(Rs2_ud,
[](uint64_t* b, uint64_t a){
if (a > *b) *b = a;
});
}}, mem_flags=ATOMIC_RETURN_OP);
}
}
}
0x0c: decode FUNCT3 {
format ROp {
0x0: decode KFUNCT5 {
0x00: decode BS {
0x0: add({{
Rd = rvSext(Rs1_sd + Rs2_sd);
}});
0x1: sub({{
Rd = rvSext(Rs1_sd - Rs2_sd);
}});
}
0x01: decode BS {
0x0: mul({{
Rd = rvSext(Rs1_sd * Rs2_sd);
}}, IntMultOp);
}
0x08: decode BS {
0x1: decode RVTYPE {
0x0: sha512sum0r({{
Rd_sw = _rvk_emu_sha512sum0r(Rs1_sw, Rs2_sw);
}});
}
}
0x09: decode BS {
0x1: decode RVTYPE {
0x0: sha512sum1r({{
Rd_sw = _rvk_emu_sha512sum1r(Rs1_sw, Rs2_sw);
}});
}
}
0x0a: decode BS {
0x1: decode RVTYPE {
0x0: sha512sig0l({{
Rd_sw = _rvk_emu_sha512sig0l(Rs1_sw, Rs2_sw);
}});
}
}
0x0b: decode BS {
0x1: decode RVTYPE {
0x0: sha512sig1l({{
Rd_sw = _rvk_emu_sha512sig1l(Rs1_sw, Rs2_sw);
}});
}
}
0x0e: decode BS {
0x1: decode RVTYPE {
0x0: sha512sig0h({{
Rd_sw = _rvk_emu_sha512sig0h(Rs1_sw, Rs2_sw);
}});
}
}
0x0f: decode BS {
0x1: decode RVTYPE {
0x0: sha512sig1h({{
Rd_sw = _rvk_emu_sha512sig1h(Rs1_sw, Rs2_sw);
}});
}
}
0x11: decode RVTYPE {
0x0: BSOp::aes32esi({{
Rd_sw = _rvk_emu_aes32esi(Rs1_sw, Rs2_sw, bs);
}});
}
0x13: decode RVTYPE {
0x0: BSOp::aes32esmi({{
Rd_sw = _rvk_emu_aes32esmi(Rs1_sw, Rs2_sw, bs);
}});
}
0x15: decode RVTYPE {
0x0: BSOp::aes32dsi({{
Rd_sw = _rvk_emu_aes32dsi(Rs1_sw, Rs2_sw, bs);
}});
}
0x17: decode RVTYPE {
0x0: BSOp::aes32dsmi({{
Rd_sw = _rvk_emu_aes32dsmi(Rs1_sw, Rs2_sw, bs);
}});
}
0x18: BSOp::sm4ed({{
Rd_sw = _rvk_emu_sm4ed(Rs1_sw, Rs2_sw, bs);
}});
0x19: decode BS {
0x0: decode RVTYPE {
0x1: aes64es({{
Rd_sd = _rvk_emu_aes64es(Rs1_sd, Rs2_sd);
}});
}
}
0x1a: BSOp::sm4ks({{
Rd_sw = _rvk_emu_sm4ks(Rs1_sw, Rs2_sw, bs);
}});
0x1b: decode BS {
0x0: decode RVTYPE {
0x1: aes64esm({{
Rd_sd = _rvk_emu_aes64esm(Rs1_sd, Rs2_sd);
}});
}
}
0x1d: decode BS {
0x0: decode RVTYPE {
0x1: aes64ds({{
Rd_sd = _rvk_emu_aes64ds(Rs1_sd, Rs2_sd);
}});
}
}
0x1f: decode BS {
0x0: decode RVTYPE {
0x1: aes64dsm({{
Rd_sd = _rvk_emu_aes64dsm(Rs1_sd, Rs2_sd);
}});
}
0x1: decode RVTYPE {
0x1: aes64ks2({{
Rd_sd = _rvk_emu_aes64ks2(Rs1_sd, Rs2_sd);
}});
}
}
}
0x1: decode FUNCT7 {
0x0: sll({{
Rd = rvSext(Rs1 << rvSelect(Rs2<4:0>, Rs2<5:0>));
}});
0x1: mulh({{
if (machInst.rv_type == RV32) {
Rd_sd = mulh_32(Rs1_sd, Rs2_sd);
} else {
Rd_sd = mulh_64(Rs1_sd, Rs2_sd);
}
}}, IntMultOp);
0x5: clmul({{
uint64_t result = 0;
for (int i = 0; i < rvSelect(32, 64); i++) {
if ((Rs2 >> i) & 1) {
result ^= Rs1 << i;
}
}
Rd = rvSext(result);
}});
0x14: bset({{
Rs2 &= rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 | (UINT64_C(1) << Rs2));
}});
0x24: bclr({{
Rs2 &= rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 & (~(UINT64_C(1) << Rs2)));
}});
0x30: rol({{
uint64_t xlen = rvSelect(32, 64);
int shamt = Rs2 & (xlen - 1);
Rd = rvSext((Rs1 << shamt)
| (rvZext(Rs1) >> ((xlen - shamt) & (xlen - 1))));
}});
0x34: binv({{
Rs2 &= rvSelect(32 - 1, 64 - 1);
Rd = rvSext(Rs1 ^ (UINT64_C(1) << Rs2));
}});
}
0x2: decode FUNCT7 {
0x0: slt({{
Rd = (rvSext(Rs1_sd) < rvSext(Rs2_sd)) ? 1 : 0;
}});
0x1: mulhsu({{
if (machInst.rv_type == RV32) {
Rd_sd = mulhsu_32(Rs1_sd, Rs2);
} else {
Rd_sd = mulhsu_64(Rs1_sd, Rs2);
}
}}, IntMultOp);
0x5: clmulr({{
uint64_t result = 0;
uint64_t xlen = rvSelect(32, 64);
uint64_t zextRs1 = rvZext(Rs1);
for (int i = 0; i < xlen; i++) {
if ((Rs2 >> i) & 1) {
result ^= zextRs1 >> (xlen-i-1);
}
}
Rd = rvSext(result);
}});
0x10: sh1add({{
Rd = rvSext((Rs1 << 1) + Rs2);
}});
0x14: xperm4({{
if (machInst.rv_type == RV32) {
Rd_sd = _rvk_emu_xperm4_32(Rs1_sd, Rs2_sd);
} else {
Rd_sd = _rvk_emu_xperm4_64(Rs1_sd, Rs2_sd);
}
}});
}
0x3: decode FUNCT7 {
0x0: sltu({{
Rd = (rvZext(Rs1) < rvZext(Rs2)) ? 1 : 0;
}});
0x1: mulhu({{
if (machInst.rv_type == RV32) {
Rd = (int32_t)mulhu_32(Rs1, Rs2);
} else {
Rd = mulhu_64(Rs1, Rs2);
}
}}, IntMultOp);
0x5: clmulh({{
uint64_t result = 0;
uint64_t xlen = rvSelect(32, 64);
uint64_t zextRs1 = rvZext(Rs1);
for (int i = 1; i < xlen; i++) {
if ((Rs2 >> i) & 1) {
result ^= zextRs1 >> (xlen-i);
}
}
// The MSB can never be 1, no need to sign extend.
Rd = result;
}});
}
0x4: decode FUNCT7 {
0x0: xor({{
Rd = rvSext(Rs1 ^ Rs2);
}});
0x1: div({{
if (machInst.rv_type == RV32) {
Rd_sd = div<int32_t>(Rs1, Rs2);
} else {
Rd_sd = div<int64_t>(Rs1, Rs2);
}
}}, IntDivOp);
0x4: pack({{
int xlen = rvSelect(32, 64);
Rd = rvSext(
(bits(Rs2, xlen/2-1, 0) << (xlen / 2)) | \
bits(Rs1, xlen/2-1, 0)
);
}});
0x5: min({{
Rd_sd = std::min(rvSext(Rs1_sd), rvSext(Rs2_sd));
}});
0x10: sh2add({{
Rd = rvSext((Rs1 << 2) + Rs2);
}});
0x14: xperm8({{
if (machInst.rv_type == RV32) {
Rd_sd = _rvk_emu_xperm8_32(Rs1_sd, Rs2_sd);
} else {
Rd_sd = _rvk_emu_xperm8_64(Rs1_sd, Rs2_sd);
}
}});
0x20: xnor({{
Rd = rvSext(~(Rs1 ^ Rs2));
}});
}
0x5: decode FUNCT7 {
0x0: srl({{
Rd = rvSext(rvZext(Rs1) >>
rvSelect(Rs2<4:0>, Rs2<5:0>));
}});
0x1: divu({{
if (machInst.rv_type == RV32) {
Rd = (int32_t)divu<uint32_t>(Rs1, Rs2);
} else {
Rd = divu<uint64_t>(Rs1, Rs2);
}
}}, IntDivOp);
0x20: sra({{
Rd = rvSext(Rs1_sd) >> rvSelect(Rs2<4:0>, Rs2<5:0>);
}});
0x5: minu({{
Rd = rvSext(std::min(rvZext(Rs1), rvZext(Rs2)));
}});
0x24: bext({{
Rs2 &= (rvSelect(32, 64) - 1);
// It doesn't need to sign ext because MSB is always 0
Rd = (Rs1 >> Rs2) & 0x1;
}});
0x30: ror({{
uint64_t xlen = rvSelect(32, 64);
int shamt = Rs2 & (xlen - 1);
Rd = rvSext((rvZext(Rs1) >> shamt)
| (Rs1 << ((xlen - shamt) & (xlen - 1))));
}});
}
0x6: decode FUNCT7 {
0x0: or({{
Rd = rvSext(Rs1 | Rs2);
}});
0x1: rem({{
if (machInst.rv_type == RV32) {
Rd_sd = rem<int32_t>(Rs1, Rs2);
} else {
Rd_sd = rem<int64_t>(Rs1, Rs2);
}
}}, IntDivOp);
0x5: max({{
Rd_sd = std::max(rvSext(Rs1_sd), rvSext(Rs2_sd));
}});
0x10: sh3add({{
Rd = rvSext((Rs1 << 3) + Rs2);
}});
0x20: orn({{
Rd = rvSext(Rs1 | (~Rs2));
}});
}
0x7: decode FUNCT7 {
0x0: and({{
Rd = rvSext(Rs1 & Rs2);
}});
0x1: remu({{
if (machInst.rv_type == RV32) {
Rd = (int32_t)remu<uint32_t>(Rs1, Rs2);
} else {
Rd = remu<uint64_t>(Rs1, Rs2);
}
}}, IntDivOp);
0x4: packh({{
// It doesn't need to sign ext as MSB is always 0
Rd = (Rs2_ub << 8) | Rs1_ub;
}});
0x5: maxu({{
Rd = rvSext(std::max(rvZext(Rs1), rvZext(Rs2)));
}});
0x20: andn({{
Rd = rvSext(Rs1 & (~Rs2));
}});
}
}
}
0x0d: UOp::lui({{
Rd = (sext<20>(imm) << 12);
}});
0x0e: decode RVTYPE {
0x1: decode FUNCT3 {
format ROp {
0x0: decode FUNCT7 {
0x0: addw({{
Rd_sd = Rs1_sw + Rs2_sw;
}});
0x1: mulw({{
Rd_sd = (int32_t)(Rs1_sw*Rs2_sw);
}}, IntMultOp);
0x4: add_uw({{
Rd = Rs1_uw + Rs2;
}});
0x20: subw({{
Rd_sd = Rs1_sw - Rs2_sw;
}});
}
0x1: decode FUNCT7 {
0x0: sllw({{
Rd_sd = Rs1_sw << Rs2<4:0>;
}});
0x30: rolw({{
int shamt = Rs2 & (32 - 1);
Rd = (int32_t) ((Rs1_uw << shamt) | (Rs1_uw >> ((32 - shamt) & (32 - 1))));
}});
}
0x2: decode FUNCT7 {
0x10: sh1add_uw({{
Rd = (((uint64_t)Rs1_uw) << 1) + Rs2;
}});
}
0x4: decode FUNCT7 {
0x1: divw({{
Rd_sd = div<int32_t>(Rs1, Rs2);
}}, IntDivOp);
0x4: packw({{
Rd_sd = sext<32>((Rs2_uh << 16) | Rs1_uh);
}});
0x10: sh2add_uw({{
Rd = (((uint64_t)Rs1_uw) << 2) + Rs2;
}});
}
0x5: decode FUNCT7 {
0x0: srlw({{
Rd_sd = (int32_t)(Rs1_uw >> Rs2<4:0>);
}});
0x1: divuw({{
Rd = sext<32>(divu<uint32_t>(Rs1, Rs2));
}}, IntDivOp);
0x20: sraw({{
Rd_sd = Rs1_sw >> Rs2<4:0>;
}});
0x30: rorw({{
int shamt = Rs2 & (32 - 1);
Rd = (int32_t) ((Rs1_uw >> shamt) | (Rs1_uw << ((32 - shamt) & (32 - 1))));
}});
}
0x6: decode FUNCT7 {
0x1: remw({{
Rd_sd = rem<int32_t>(Rs1, Rs2);
}}, IntDivOp);
0x10: sh3add_uw({{
Rd = (((uint64_t)Rs1_uw) << 3) + Rs2;
}});
}
0x7: remuw({{
Rd = sext<32>(remu<uint32_t>(Rs1, Rs2));
}}, IntDivOp);
}
}
}
format FPROp {
0x10: decode FUNCT2 {
0x0: fmadd_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits)),
f32(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fmadd_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits)),
f64(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x2: fmadd_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_mulAdd(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits)),
f16(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
0x11: decode FUNCT2 {
0x0: fmsub_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits)),
f32(f32(freg(Fs3_bits)).v ^
mask(31, 31))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fmsub_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits)),
f64(f64(freg(Fs3_bits)).v ^
mask(63, 63))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x2: fmsub_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_mulAdd(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits)),
f16(f16(freg(Fs3_bits)).v ^
mask(15, 15))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
0x12: decode FUNCT2 {
0x0: fnmsub_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^
mask(31, 31)),
f32(freg(Fs2_bits)),
f32(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fnmsub_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^
mask(63, 63)),
f64(freg(Fs2_bits)),
f64(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x2: fnmsub_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_mulAdd(f16(f16(freg(Fs1_bits)).v ^
mask(15, 15)),
f16(freg(Fs2_bits)),
f16(freg(Fs3_bits))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
0x13: decode FUNCT2 {
0x0: fnmadd_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mulAdd(f32(f32(freg(Fs1_bits)).v ^
mask(31, 31)),
f32(freg(Fs2_bits)),
f32(f32(freg(Fs3_bits)).v ^
mask(31, 31))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x1: fnmadd_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mulAdd(f64(f64(freg(Fs1_bits)).v ^
mask(63, 63)),
f64(freg(Fs2_bits)),
f64(f64(freg(Fs3_bits)).v ^
mask(63, 63))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
0x2: fnmadd_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_mulAdd(f16(f16(freg(Fs1_bits)).v ^
mask(15, 15)),
f16(freg(Fs2_bits)),
f16(f16(freg(Fs3_bits)).v ^
mask(15, 15))));
Fd_bits = fd.v;
}}, FloatMultAccOp);
}
0x14: decode FUNCT7 {
0x0: fadd_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_add(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x1: fadd_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_add(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x2: fadd_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_add(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x4: fsub_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_sub(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x5: fsub_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_sub(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x6: fsub_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_sub(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatAddOp);
0x8: fmul_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_mul(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatMultOp);
0x9: fmul_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_mul(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatMultOp);
0xa: fmul_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_mul(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatMultOp);
0xc: fdiv_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_div(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatDivOp);
0xd: fdiv_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_div(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatDivOp);
0xe: fdiv_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_div(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))));
Fd_bits = fd.v;
}}, FloatDivOp);
0x10: decode ROUND_MODE {
0x0: fsgnj_s({{
auto sign = bits(unboxF32(Fs2_bits), 31);
Fd_bits = boxF32(insertBits(unboxF32(Fs1_bits), 31,
sign));
}}, FloatMiscOp);
0x1: fsgnjn_s({{
auto sign = ~bits(unboxF32(Fs2_bits), 31);
Fd_bits = boxF32(insertBits(unboxF32(Fs1_bits), 31,
sign));
}}, FloatMiscOp);
0x2: fsgnjx_s({{
auto sign = bits(
unboxF32(Fs1_bits) ^ unboxF32(Fs2_bits), 31);
Fd_bits = boxF32(insertBits(unboxF32(Fs1_bits), 31,
sign));
}}, FloatMiscOp);
}
0x11: decode ROUND_MODE {
0x0: fsgnj_d({{
Fd_bits = insertBits(Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
0x1: fsgnjn_d({{
Fd_bits = insertBits(~Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
0x2: fsgnjx_d({{
Fd_bits = insertBits(
Fs1_bits ^ Fs2_bits, 62, 0, Fs1_bits);
}}, FloatMiscOp);
}
0x12: decode ROUND_MODE {
0x0: fsgnj_h({{
auto sign = bits(unboxF16(Fs2_bits), 15);
Fd_bits = boxF16(insertBits(unboxF16(Fs1_bits), 15,
sign));
}}, FloatMiscOp);
0x1: fsgnjn_h({{
auto sign = ~bits(unboxF16(Fs2_bits), 15);
Fd_bits = boxF16(insertBits(unboxF16(Fs1_bits), 15,
sign));
}}, FloatMiscOp);
0x2: fsgnjx_h({{
auto sign = bits(
unboxF16(Fs1_bits) ^ unboxF16(Fs2_bits), 15);
Fd_bits = boxF16(insertBits(unboxF16(Fs1_bits), 15,
sign));
}}, FloatMiscOp);
}
0x14: decode ROUND_MODE {
0x0: fmin_s({{
bool less = f32_lt_quiet(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))) ||
(f32_eq(f32(freg(Fs1_bits)),
f32(freg(Fs2_bits))) &&
bits(f32(freg(Fs1_bits)).v, 31));
Fd_bits = less ||
isNaNF32UI(f32(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF32UI(f32(freg(Fs1_bits)).v) &&
isNaNF32UI(f32(freg(Fs2_bits)).v))
Fd_bits = f32(defaultNaNF32UI).v;
}}, FloatCmpOp);
0x1: fmax_s({{
bool greater = f32_lt_quiet(f32(freg(Fs2_bits)),
f32(freg(Fs1_bits))) ||
(f32_eq(f32(freg(Fs2_bits)),
f32(freg(Fs1_bits))) &&
bits(f32(freg(Fs2_bits)).v, 31));
Fd_bits = greater ||
isNaNF32UI(f32(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF32UI(f32(freg(Fs1_bits)).v) &&
isNaNF32UI(f32(freg(Fs2_bits)).v))
Fd_bits = f32(defaultNaNF32UI).v;
}}, FloatCmpOp);
}
0x15: decode ROUND_MODE {
0x0: fmin_d({{
bool less = f64_lt_quiet(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))) ||
(f64_eq(f64(freg(Fs1_bits)),
f64(freg(Fs2_bits))) &&
bits(f64(freg(Fs1_bits)).v, 63));
Fd_bits = less ||
isNaNF64UI(f64(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF64UI(f64(freg(Fs1_bits)).v) &&
isNaNF64UI(f64(freg(Fs2_bits)).v))
Fd_bits = f64(defaultNaNF64UI).v;
}}, FloatCmpOp);
0x1: fmax_d({{
bool greater =
f64_lt_quiet(f64(freg(Fs2_bits)),
f64(freg(Fs1_bits))) ||
(f64_eq(f64(freg(Fs2_bits)),
f64(freg(Fs1_bits))) &&
bits(f64(freg(Fs2_bits)).v, 63));
Fd_bits = greater ||
isNaNF64UI(f64(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF64UI(f64(freg(Fs1_bits)).v) &&
isNaNF64UI(f64(Fs2_bits).v))
Fd_bits = f64(defaultNaNF64UI).v;
}}, FloatCmpOp);
}
0x16: decode ROUND_MODE {
0x0: fmin_h({{
bool less = f16_lt_quiet(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))) ||
(f16_eq(f16(freg(Fs1_bits)),
f16(freg(Fs2_bits))) &&
bits(f16(freg(Fs1_bits)).v, 15));
Fd_bits = less ||
isNaNF16UI(f16(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF16UI(f16(freg(Fs1_bits)).v) &&
isNaNF16UI(f16(freg(Fs2_bits)).v))
Fd_bits = f16(defaultNaNF16UI).v;
}}, FloatCmpOp);
0x1: fmax_h({{
bool greater = f16_lt_quiet(f16(freg(Fs2_bits)),
f16(freg(Fs1_bits))) ||
(f16_eq(f16(freg(Fs2_bits)),
f16(freg(Fs1_bits))) &&
bits(f16(freg(Fs2_bits)).v, 15));
Fd_bits = greater ||
isNaNF16UI(f16(freg(Fs2_bits)).v) ?
freg(Fs1_bits).v : freg(Fs2_bits).v;
if (isNaNF16UI(f16(freg(Fs1_bits)).v) &&
isNaNF16UI(f16(freg(Fs2_bits)).v))
Fd_bits = f16(defaultNaNF16UI).v;
}}, FloatCmpOp);
}
0x20: decode CONV_SGN {
0x1: fcvt_s_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_to_f32(f64(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x2: fcvt_s_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_to_f32(f16(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x21: decode CONV_SGN {
0x0: fcvt_d_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_to_f64(f32(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x2: fcvt_d_h({{
RM_REQUIRED;
freg_t fd;
fd = freg(f16_to_f64(f16(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x22: decode CONV_SGN {
0x0: fcvt_h_s({{
RM_REQUIRED;
freg_t fd;
fd = freg(f32_to_f16(f32(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x1: fcvt_h_d({{
RM_REQUIRED;
freg_t fd;
fd = freg(f64_to_f16(f64(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x2c: fsqrt_s({{
if (RS2 != 0) {
return std::make_shared<IllegalInstFault>(
"source reg x1", machInst);
}
freg_t fd;
RM_REQUIRED;
fd = freg(f32_sqrt(f32(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatSqrtOp);
0x2d: fsqrt_d({{
if (RS2 != 0) {
return std::make_shared<IllegalInstFault>(
"source reg x1", machInst);
}
freg_t fd;
RM_REQUIRED;
fd = freg(f64_sqrt(f64(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatSqrtOp);
0x2e: fsqrt_h({{
if (RS2 != 0) {
return std::make_shared<IllegalInstFault>(
"source reg x1", machInst);
}
freg_t fd;
RM_REQUIRED;
fd = freg(f16_sqrt(f16(freg(Fs1_bits))));
Fd_bits = fd.v;
}}, FloatSqrtOp);
0x50: decode ROUND_MODE {
0x0: fle_s({{
Rd = f32_le(f32(freg(Fs1_bits)), f32(freg(Fs2_bits)));
}}, FloatCmpOp);
0x1: flt_s({{
Rd = f32_lt(f32(freg(Fs1_bits)), f32(freg(Fs2_bits)));
}}, FloatCmpOp);
0x2: feq_s({{
Rd = f32_eq(f32(freg(Fs1_bits)), f32(freg(Fs2_bits)));
}}, FloatCmpOp);
}
0x51: decode ROUND_MODE {
0x0: fle_d({{
Rd = f64_le(f64(freg(Fs1_bits)), f64(freg(Fs2_bits)));
}}, FloatCmpOp);
0x1: flt_d({{
Rd = f64_lt(f64(freg(Fs1_bits)), f64(freg(Fs2_bits)));
}}, FloatCmpOp);
0x2: feq_d({{
Rd = f64_eq(f64(freg(Fs1_bits)), f64(freg(Fs2_bits)));
}}, FloatCmpOp);
}
0x52: decode ROUND_MODE {
0x0: fle_h({{
Rd = f16_le(f16(freg(Fs1_bits)), f16(freg(Fs2_bits)));
}}, FloatCmpOp);
0x1: flt_h({{
Rd = f16_lt(f16(freg(Fs1_bits)), f16(freg(Fs2_bits)));
}}, FloatCmpOp);
0x2: feq_h({{
Rd = f16_eq(f16(freg(Fs1_bits)), f16(freg(Fs2_bits)));
}}, FloatCmpOp);
}
0x60: decode CONV_SGN {
0x0: fcvt_w_s({{
RM_REQUIRED;
Rd_sd = sext<32>(f32_to_i32(f32(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x1: fcvt_wu_s({{
RM_REQUIRED;
Rd = sext<32>(f32_to_ui32(f32(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_l_s({{
RM_REQUIRED;
Rd_sd = f32_to_i64(f32(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_lu_s({{
RM_REQUIRED;
Rd = f32_to_ui64(f32(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
}
0x61: decode CONV_SGN {
0x0: fcvt_w_d({{
RM_REQUIRED;
Rd_sd = sext<32>(f64_to_i32(f64(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x1: fcvt_wu_d({{
RM_REQUIRED;
Rd = sext<32>(f64_to_ui32(f64(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_l_d({{
RM_REQUIRED;
Rd_sd = f64_to_i64(f64(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_lu_d({{
RM_REQUIRED;
Rd = f64_to_ui64(f64(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
}
0x62: decode CONV_SGN {
0x0: fcvt_w_h({{
RM_REQUIRED;
Rd_sd = sext<32>(f16_to_i32(f16(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x1: fcvt_wu_h({{
RM_REQUIRED;
Rd = sext<32>(f16_to_ui32(f16(freg(Fs1_bits)), rm,
true));
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_l_h({{
RM_REQUIRED;
Rd_sd = f16_to_i64(f16(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_lu_h({{
RM_REQUIRED;
Rd = f16_to_ui64(f16(freg(Fs1_bits)), rm, true);
}}, FloatCvtOp);
}
}
0x68: decode CONV_SGN {
0x0: fcvt_s_w({{
RM_REQUIRED;
freg_t fd;
fd = freg(i32_to_f32(Rs1_sw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x1: fcvt_s_wu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui32_to_f32(Rs1_uw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_s_l({{
RM_REQUIRED;
freg_t fd;
fd = freg(i64_to_f32(Rs1_ud));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_s_lu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui64_to_f32(Rs1));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
}
0x69: decode CONV_SGN {
0x0: fcvt_d_w({{
RM_REQUIRED;
freg_t fd;
fd = freg(i32_to_f64(Rs1_sw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x1: fcvt_d_wu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui32_to_f64(Rs1_uw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_d_l({{
RM_REQUIRED;
freg_t fd;
fd = freg(i64_to_f64(Rs1_sd));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_d_lu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui64_to_f64(Rs1));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
}
0x6a: decode CONV_SGN {
0x0: fcvt_h_w({{
RM_REQUIRED;
freg_t fd;
fd = freg(i32_to_f16((int32_t)Rs1_sw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x1: fcvt_h_wu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui32_to_f16((uint32_t)Rs1_uw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x2: decode RVTYPE {
0x1: fcvt_h_l({{
RM_REQUIRED;
freg_t fd;
fd = freg(i64_to_f16(Rs1_ud));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x3: decode RVTYPE {
0x1: fcvt_h_lu({{
RM_REQUIRED;
freg_t fd;
fd = freg(ui64_to_f16(Rs1));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
}
0x70: decode ROUND_MODE {
0x0: fmv_x_w({{
uint64_t result = (uint32_t)Fs1_bits;
if ((result&0x80000000) != 0) {
result |= (0xFFFFFFFFULL << 32);
}
Rd = result;
}}, FloatCvtOp);
0x1: fclass_s({{
Rd = rvSext(f32_classify(f32(freg(Fs1_bits))));
}}, FloatMiscOp);
}
0x71: decode ROUND_MODE {
0x0: decode RVTYPE {
0x1: fmv_x_d({{
Rd = freg(Fs1_bits).v;
}}, FloatCvtOp);
}
0x1: fclass_d({{
Rd = f64_classify(f64(freg(Fs1_bits)));
}}, FloatMiscOp);
}
0x72: decode ROUND_MODE {
0x0: fmv_x_h({{
uint64_t result = (uint16_t)Fs1_bits;
if ((result&0x8000) != 0) {
result |= (0xFFFFFFFFFFFFULL << 16);
}
Rd = result;
}}, FloatCvtOp);
0x1: fclass_h({{
Rd = f16_classify(f16(freg(Fs1_bits)));
}}, FloatMiscOp);
}
0x78: fmv_w_x({{
freg_t fd;
fd = freg(f32(Rs1_uw));
Fd_bits = fd.v;
}}, FloatCvtOp);
0x79: decode RVTYPE {
0x1: fmv_d_x({{
freg_t fd;
fd = freg(f64(Rs1));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
0x7a: fmv_h_x({{
freg_t fd;
fd = freg(f16(Rs1_uh));
Fd_bits = fd.v;
}}, FloatCvtOp);
}
}
0x18: decode FUNCT3 {
format BOp {
0x0: beq({{
if (rvSext(Rs1) == rvSext(Rs2)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
0x1: bne({{
if (rvSext(Rs1) != rvSext(Rs2)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
0x4: blt({{
if (rvSext(Rs1_sd) < rvSext(Rs2_sd)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
0x5: bge({{
if (rvSext(Rs1_sd) >= rvSext(Rs2_sd)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
0x6: bltu({{
if (rvZext(Rs1) < rvZext(Rs2)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
0x7: bgeu({{
if (rvZext(Rs1) >= rvZext(Rs2)) {
NPC = rvZext(PC + imm);
} else {
NPC = rvZext(NPC);
}
}}, IsDirectControl, IsCondControl);
}
}
0x19: decode FUNCT3 {
0x0: Jump::jalr({{
Rd = rvSext(NPC);
NPC = rvZext((imm + Rs1) & (~0x1));
}}, IsIndirectControl, IsUncondControl);
}
0x1b: JOp::jal({{
Rd = rvSext(NPC);
NPC = rvZext(PC + imm);
}}, IsDirectControl, IsUncondControl);
0x1c: decode FUNCT3 {
format SystemOp {
0x0: decode FUNCT7 {
0x0: decode RS2 {
0x0: ecall({{
return std::make_shared<SyscallFault>(
(PrivilegeMode)xc->readMiscReg(MISCREG_PRV));
}}, IsSerializeAfter, IsNonSpeculative, IsSyscall,
No_OpClass);
0x1: ebreak({{
return std::make_shared<BreakpointFault>(
xc->pcState());
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x2: uret({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
status.uie = status.upie;
status.upie = 1;
xc->setMiscReg(MISCREG_STATUS, status);
NPC = xc->readMiscReg(MISCREG_UEPC);
}}, IsSerializeAfter, IsNonSpeculative, IsReturn);
}
0x8: decode RS2 {
0x2: sret({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
auto pm = (PrivilegeMode)xc->readMiscReg(
MISCREG_PRV);
if (pm == PRV_U ||
(pm == PRV_S && status.tsr == 1)) {
return std::make_shared<IllegalInstFault>(
"sret in user mode or TSR enabled",
machInst);
NPC = NPC;
} else {
xc->setMiscReg(MISCREG_PRV, status.spp);
status.sie = status.spie;
status.spie = 1;
status.spp = PRV_U;
xc->setMiscReg(MISCREG_STATUS, status);
NPC = xc->readMiscReg(MISCREG_SEPC);
}
}}, IsSerializeAfter, IsNonSpeculative, IsReturn);
0x5: wfi({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
auto pm = (PrivilegeMode)xc->readMiscReg(
MISCREG_PRV);
if (pm == PRV_U ||
(pm == PRV_S && status.tw == 1)) {
return std::make_shared<IllegalInstFault>(
"wfi in user mode or TW enabled",
machInst);
}
// Go to sleep only if there's no pending interrupt
// at all, including masked interrupts.
auto tc = xc->tcBase();
auto cpu = tc->getCpuPtr();
auto ic = dynamic_cast<RiscvISA::Interrupts*>(
cpu->getInterruptController(tc->threadId()));
panic_if(!ic, "Invalid Interrupt Controller.");
if (ic->readIP() == 0
&& xc->readMiscReg(MISCREG_NMIP) == 0) {
tc->quiesce();
}
}}, IsNonSpeculative, IsQuiesce,
IsSerializeAfter, No_OpClass);
}
0x9: sfence_vma({{
STATUS status = xc->readMiscReg(MISCREG_STATUS);
auto pm = (PrivilegeMode)xc->readMiscReg(MISCREG_PRV);
if (pm == PRV_U || (pm == PRV_S && status.tvm == 1)) {
return std::make_shared<IllegalInstFault>(
"sfence in user mode or TVM enabled",
machInst);
}
xc->tcBase()->getMMUPtr()->demapPage(Rs1, Rs2);
}}, IsNonSpeculative, IsSerializeAfter, No_OpClass);
0x18: mret({{
if (xc->readMiscReg(MISCREG_PRV) != PRV_M) {
return std::make_shared<IllegalInstFault>(
"mret at lower privilege", machInst);
NPC = NPC;
} else {
STATUS status = xc->readMiscReg(MISCREG_STATUS);
xc->setMiscReg(MISCREG_PRV, status.mpp);
xc->setMiscReg(MISCREG_NMIE, 1);
status.mie = status.mpie;
status.mpie = 1;
status.mpp = PRV_U;
xc->setMiscReg(MISCREG_STATUS, status);
NPC = xc->readMiscReg(MISCREG_MEPC);
}
}}, IsSerializeAfter, IsNonSpeculative, IsReturn);
}
}
format CSROp {
0x1: csrrw({{
Rd = rvSext(data);
data = rvZext(Rs1);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x2: csrrs({{
Rd = rvSext(data);
data = rvZext(data | Rs1);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x3: csrrc({{
Rd = rvSext(data);
data = rvZext(data & ~Rs1);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x5: csrrwi({{
Rd = rvSext(data);
data = rvZext(uimm);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x6: csrrsi({{
Rd = rvSext(data);
data = rvZext(data | uimm);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
0x7: csrrci({{
Rd = rvSext(data);
data = rvZext(data & ~uimm);
}}, IsSerializeAfter, IsNonSpeculative, No_OpClass);
}
}
0x1e: M5Op::M5Op();
}
}