blob: 5b16d72b89b84c9ec20e360567432734c888d9e8 [file] [log] [blame]
// -*- mode:c++ -*-
// Copyright (c) 2009 The University of Edinburgh
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met: redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer;
// redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution;
// neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////
//
// The actual Power ISA decoder
// ------------------------------
//
// Power ISA v3.0B has been used for instruction formats, opcode numbers,
// opcode field names, register names, etc.
//
decode PO default Unknown::unknown() {
4: decode VA_XO {
// Arithmetic instructions that use source registers Ra, Rb and Rc,
// with destination register Rt.
format IntArithOp {
48: maddhd({{
int64_t res;
std::tie(std::ignore, res) = multiplyAdd(Ra_sd, Rb_sd, Rc_sd);
Rt = res;
}});
49: maddhdu({{
uint64_t res;
std::tie(std::ignore, res) = multiplyAdd(Ra, Rb, Rc);
Rt = res;
}});
51: maddld({{
uint64_t res;
std::tie(res, std::ignore) = multiplyAdd(Ra_sd, Rb_sd, Rc_sd);
Rt = res;
}});
}
}
format IntImmArithOp {
7: mulli({{
int64_t res = Ra_sd * si;
Rt = res;
}});
8: subfic({{
uint64_t src = ~Ra;
Rt = src + si + 1;
}}, true);
}
format IntImmOp {
10: cmpli({{
Xer xer = XER;
uint32_t cr = makeCRFieldUnsigned(Ra_uw, ui, xer.so);
CR = insertCRField(CR, BF, cr);
}});
11: cmpi({{
Xer xer = XER;
uint32_t cr = makeCRFieldSigned(Ra_sw, si, xer.so);
CR = insertCRField(CR, BF, cr);
}});
}
format IntImmArithOp {
12: addic({{
uint64_t src = Ra;
Rt = src + si;
}}, true);
13: addic_({{
uint64_t src = Ra;
Rt = src + si;
}}, true, true);
}
format IntImmArithCheckRaOp {
14: addi({{ Rt = Ra + si; }},
{{ Rt = si }});
15: addis({{ Rt = Ra + (si << 16); }},
{{ Rt = si << 16; }});
}
// Conditionally branch to a PC-relative or absoulute address based
// on CR and CTR.
16: BranchDispCondOp::bc({{ NIA = CIA + bd; }},
{{ NIA = bd; }});
17: IntOp::sc({{ return std::make_shared<SESyscallFault>(); }});
// Unconditionally branch to a PC-relative or absoulute address.
18: BranchOp::b({{ NIA = CIA + li; }},
{{ NIA = li; }});
19: decode XL_XO {
0: CondMoveOp::mcrf({{
uint32_t crBfa = bits(CR, 31 - bfa*4, 28 - bfa*4);
CR = insertBits(CR, 31 - bf*4, 28 - bf*4, crBfa);
}});
// Conditionally branch to address in LR based on CR and CTR.
16: BranchRegCondOp::bclr({{ NIA = LR & -4ULL; }}, true, [ IsReturn ]);
format CondLogicOp {
33: crnor({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, !(crBa | crBb));
}});
129: crandc({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa & !crBb);
}});
}
150: MiscOp::isync({{ }}, [ IsSerializeAfter ]);
format CondLogicOp {
193: crxor({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa ^ crBb);
}});
255: crnand({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, !(crBa & crBb));
}});
257: crand({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa & crBb);
}});
289: creqv({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa == crBb);
}});
417: crorc({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa | !crBb);
}});
449: cror({{
uint32_t crBa = bits(CR, 31 - ba);
uint32_t crBb = bits(CR, 31 - bb);
CR = insertBits(CR, 31 - bt, crBa | crBb);
}});
}
// Conditionally branch to an address in a register based on
// either CR only or both CR and CTR.
format BranchRegCondOp {
528: bcctr({{ NIA = CTR & -4ULL; }});
560: bctar({{ NIA = TAR & -4ULL; }}, true);
}
default: decode DX_XO {
2: IntDispArithOp::addpcis({{ Rt = NIA + (d << 16); }});
}
}
format IntRotateOp {
20: rlwimi({{ Ra = (rotateValue(Rs, sh) & fullMask) |
(Ra & ~fullMask); }});
21: rlwinm({{ Ra = rotateValue(Rs, sh) & fullMask; }});
23: rlwnm({{ Ra = rotateValue(Rs, Rb) & fullMask; }});
}
format IntImmLogicOp {
24: ori({{ Ra = Rs | ui; }});
25: oris({{ Ra = Rs | (ui << 16); }});
26: xori({{ Ra = Rs ^ ui; }});
27: xoris({{ Ra = Rs ^ (ui << 16); }});
28: andi_({{ Ra = Rs & ui; }}, true);
29: andis_({{ Ra = Rs & (ui << 16); }}, true);
}
// There are a large number of instructions that have the same primary
// opcode (PO) of 31. In this case, the instructions are of different
// forms. For every form, the XO fields may vary in position and width.
// The X, XFL, XFX and XL form instructions use bits 21 - 30 and the
// XO form instructions use bits 22 - 30 as extended opcode (XO). To
// avoid conflicts, instructions of each form have to be defined under
// separate decode blocks. However, only a single decode block can be
// associated with a particular PO and it will recognize only one type
// of XO field. A solution for associating decode blocks for the other
// types of XO fields with the same PO is to have the other blocks as
// nested default cases.
31: decode X_XO {
0: IntOp::cmp({{
Xer xer = XER;
uint32_t cr = makeCRFieldSigned(Ra_sw, Rb_sw, xer.so);
CR = insertCRField(CR, BF, cr);
}});
format LoadIndexOp {
20: lwarx({{
Rt = Mem_uw;
Rsv = 1; RsvLen = 4; RsvAddr = EA;
}});
21: ldx({{ Rt = Mem; }});
23: lwzx({{ Rt = Mem_uw; }});
}
format IntLogicOp {
24: slw({{
if (Rb & 0x20) {
Ra = 0;
} else {
Ra = Rs << (Rb & 0x1f);
}
}});
26: cntlzw({{ Ra = Rs == 0 ? 32 : 31 - findMsbSet(Rs); }});
28: and({{ Ra = Rs & Rb; }});
}
32: IntOp::cmpl({{
Xer xer = XER;
uint32_t cr = makeCRFieldUnsigned(Ra_uw, Rb_uw, xer.so);
CR = insertCRField(CR, BF, cr);
}});
52: LoadIndexOp::lbarx({{
Rt = Mem_ub;
Rsv = 1; RsvLen = 1; RsvAddr = EA;
}});
53: LoadIndexUpdateOp::ldux({{ Rt = Mem; }});
55: LoadIndexUpdateOp::lwzux({{ Rt = Mem_uw; }});
60: IntLogicOp::andc({{ Ra = Rs & ~Rb; }});
format LoadIndexOp {
84: ldarx({{
Rt = Mem_ud;
Rsv = 1; RsvLen = 8; RsvAddr = EA;
}});
87: lbzx({{ Rt = Mem_ub; }});
116: lharx({{
Rt = Mem_uh;
Rsv = 1; RsvLen = 2; RsvAddr = EA;
}});
}
119: LoadIndexUpdateOp::lbzux({{ Rt = Mem_ub; }});
124: IntLogicOp::nor({{ Ra = ~(Rs | Rb); }});
format StoreIndexOp {
149: stdx({{ Mem = Rs }});
150: stwcx({{
bool store_performed = false;
Mem_uw = Rs_uw;
if (Rsv) {
if (RsvLen == 4) {
if (RsvAddr == EA) {
store_performed = true;
}
}
}
Xer xer = XER;
PowerISA::Cr cr = CR;
cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
CR = cr;
Rsv = 0;
}});
151: stwx({{ Mem_uw = Rs_uw; }});
}
format StoreIndexUpdateOp {
181: stdux({{ Mem = Rs; }});
183: stwux({{ Mem_uw = Rs_uw; }});
}
format StoreIndexOp {
214: stdcx({{
bool store_performed = false;
Mem = Rs;
if (Rsv) {
if (RsvLen == 8) {
if (RsvAddr == EA) {
store_performed = true;
}
}
}
Xer xer = XER;
Cr cr = CR;
cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
CR = cr;
Rsv = 0;
}});
215: stbx({{ Mem_ub = Rs_ub; }});
}
246: MiscOp::dcbtst({{ }});
247: StoreIndexUpdateOp::stbux({{ Mem_ub = Rs_ub; }});
267: IntArithOp::moduw({{
uint64_t src1 = Ra_uw;
uint64_t src2 = Rb_uw;
if (src2 != 0) {
Rt = src1 % src2;
} else {
Rt = 0;
}
}});
278: MiscOp::dcbt({{ }});
279: LoadIndexOp::lhzx({{ Rt = Mem_uh; }});
284: IntLogicOp::eqv({{ Ra = ~(Rs ^ Rb); }});
311: LoadIndexUpdateOp::lhzux({{ Rt = Mem_uh; }});
316: IntLogicOp::xor({{ Ra = Rs ^ Rb; }});
format LoadIndexOp {
341: lwax({{ Rt = Mem_sw; }});
343: lhax({{ Rt = Mem_sh; }});
}
format LoadIndexUpdateOp {
373: lwaux({{ Rt = Mem_sw; }});
375: lhaux({{ Rt = Mem_sh; }});
}
407: StoreIndexOp::sthx({{ Mem_uh = Rs_uh; }});
412: IntLogicOp::orc({{ Ra = Rs | ~Rb; }});
439: StoreIndexUpdateOp::sthux({{ Mem_uh = Rs_uh; }});
format IntLogicOp {
444: or({{ Ra = Rs | Rb; }});
476: nand({{ Ra = ~(Rs & Rb); }});
508: cmpb({{
uint32_t val = 0;
for (int n = 0; n < 32; n += 8) {
if(bits(Rs, n+7, n) == bits(Rb, n+7, n)) {
val = insertBits(val, n+7, n, 0xff);
}
}
Ra = val;
}});
}
format LoadIndexOp {
532: ldbrx({{ Rt = swap_byte(Mem); }});
534: lwbrx({{ Rt = swap_byte(Mem_uw); }});
535: lfsx({{ Ft_sf = Mem_sf; }});
}
536: IntLogicOp::srw({{
if (Rb & 0x20) {
Ra = 0;
} else {
Ra = Rs >> (Rb & 0x1f);
}
}});
567: LoadIndexUpdateOp::lfsux({{ Ft_sf = Mem_sf; }});
598: MiscOp::sync({{ }}, [ IsReadBarrier, IsWriteBarrier ]);
599: LoadIndexOp::lfdx({{ Ft = Mem_df; }});
631: LoadIndexUpdateOp::lfdux({{ Ft = Mem_df; }});
format StoreIndexOp {
660: stdbrx({{ Mem = swap_byte(Rs); }});
662: stwbrx({{ Mem_uw = swap_byte(Rs_uw); }});
663: stfsx({{ Mem_sf = Fs_sf; }});
694: stbcx({{
bool store_performed = false;
Mem_ub = Rs_ub;
if (Rsv) {
if (RsvLen == 1) {
if (RsvAddr == EA) {
store_performed = true;
}
}
}
Xer xer = XER;
Cr cr = CR;
cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
CR = cr;
Rsv = 0;
}});
}
695: StoreIndexUpdateOp::stfsux({{ Mem_sf = Fs_sf; }});
format StoreIndexOp {
726: sthcx({{
bool store_performed = false;
Mem_uh = Rs_uh;
if (Rsv) {
if (RsvLen == 2) {
if (RsvAddr == EA) {
store_performed = true;
}
}
}
Xer xer = XER;
Cr cr = CR;
cr.cr0 = ((store_performed ? 0x2 : 0x0) | xer.so);
CR = cr;
Rsv = 0;
}});
727: stfdx({{ Mem_df = Fs; }});
}
759: StoreIndexUpdateOp::stfdux({{ Mem_df = Fs; }});
779: IntArithOp::modsw({{
int64_t src1 = Ra_sw;
int64_t src2 = Rb_sw;
if ((src1 != INT32_MIN || src2 != -1) && src2 != 0) {
Rt = src1 % src2;
} else {
Rt = 0;
}
}});
790: LoadIndexOp::lhbrx({{ Rt = swap_byte(Mem_uh); }});
792: IntLogicOp::sraw({{
bool shiftSetCA = false;
int32_t s = Rs;
if (Rb == 0) {
Ra = Rs;
shiftSetCA = true;
} else if (Rb & 0x20) {
if (s < 0) {
Ra = (uint32_t)-1;
if (s & 0x7fffffff) {
shiftSetCA = true;
} else {
shiftSetCA = false;
}
} else {
Ra = 0;
shiftSetCA = false;
}
} else {
Ra = s >> (Rb & 0x1f);
if (s < 0 && (s << (32 - (Rb & 0x1f))) != 0) {
shiftSetCA = true;
} else {
shiftSetCA = false;
}
}
Xer xer1 = XER;
if (shiftSetCA) {
xer1.ca = 1;
} else {
xer1.ca = 0;
}
XER = xer1;
}});
824: IntShiftOp::srawi({{
bool shiftSetCA = false;
if (sh == 0) {
Ra = Rs;
shiftSetCA = false;
} else {
int32_t s = Rs;
Ra = s >> sh;
if (s < 0 && (s << (32 - sh)) != 0) {
shiftSetCA = true;
} else {
shiftSetCA = false;
}
}
Xer xer1 = XER;
if (shiftSetCA) {
xer1.ca = 1;
} else {
xer1.ca = 0;
}
XER = xer1;
}});
854: MiscOp::eieio({{ }}, [ IsReadBarrier, IsWriteBarrier ]);
855: LoadIndexOp::lfiwax({{ Ft_uw = Mem; }});
918: StoreIndexOp::sthbrx({{ Mem_uh = swap_byte(Rs_uh); }});
format IntLogicOp {
922: extsh({{ Ra = sext<16>(Rs); }});
954: extsb({{ Ra = sext<8>(Rs); }});
}
983: StoreIndexOp::stfiwx({{ Mem = Fs_uw; }});
// These instructions are of XO form with bit 21 as the OE bit.
default: decode XO_XO {
8: IntSumOp::subfc({{ ~Ra }}, {{ Rb }}, {{ 1 }}, true);
9: IntArithCheckRcOp::mulhdu({{
uint64_t res;
std::tie(std::ignore, res) = multiply(Ra, Rb);
Rt = res;
}});
10: IntSumOp::addc({{ Ra }}, {{ Rb }}, computeCA = true);
11: IntArithCheckRcOp::mulhwu({{
uint64_t res = (uint64_t)Ra_uw * Rb_uw;
res = res >> 32;
Rt = res;
}});
40: IntSumOp::subf({{ ~Ra }}, {{ Rb }}, {{ 1 }});
format IntArithCheckRcOp {
73: mulhd({{
int64_t res;
std::tie(std::ignore, res) = multiply(Ra_sd, Rb_sd);
Rt = res;
}});
75: mulhw({{
uint64_t res = (int64_t)Ra_sw * Rb_sw;
res = res >> 32;
Rt = res;
}});
}
format IntSumOp {
104: neg({{ ~Ra }}, {{ 1 }});
136: subfe({{ ~Ra }}, {{ Rb }}, {{ xer.ca }}, true);
138: adde({{ Ra }}, {{ Rb }}, {{ xer.ca }}, true);
200: subfze({{ ~Ra }}, {{ xer.ca }}, computeCA = true);
202: addze({{ Ra }}, {{ xer.ca }}, computeCA = true);
232: subfme({{ ~Ra }}, {{ -1ULL }}, {{ xer.ca }}, true);
234: addme({{ Ra }}, {{ -1ULL }}, {{ xer.ca }}, true);
}
format IntArithCheckRcOp {
233: mulld({{
int64_t src1 = Ra_sd;
int64_t src2 = Rb_sd;
uint64_t res = src1 * src2;
std::tie(res, std::ignore) = multiply(src1, src2);
if (src1 != 0 && (int64_t)res / src1 != src2) {
setOV = true;
}
Rt = res;
}}, true);
235: mullw({{
int64_t res = (int64_t)Ra_sw * Rb_sw;
if (res != (int32_t)res) {
setOV = true;
}
Rt = res;
}}, true);
}
266: IntSumOp::add({{ Ra }}, {{ Rb }});
format IntArithCheckRcOp {
393: divdeu({{
uint64_t src1 = Ra;
uint64_t src2 = Rb;
uint64_t res;
std::tie(setOV, res, std::ignore) = divide(0, src1, src2);
if (!setOV) {
Rt = res;
} else {
Rt = 0;
}
}}, true);
395: divweu({{
uint32_t src1 = Ra_ud;
uint32_t src2 = Rb_ud;
uint64_t res;
if (src2 != 0) {
res = ((uint64_t)src1 << 32) / src2;
if (res <= UINT32_MAX) {
Rt = (uint32_t)res;
} else {
Rt = 0;
setOV = true;
}
} else {
Rt = 0;
setOV = true;
}
}}, true);
425: divde({{
int64_t src1 = Ra_sd;
int64_t src2 = Rb_sd;
int64_t res;
std::tie(setOV, res, std::ignore) = divide(0, src1, src2);
if (!setOV) {
Rt = res;
} else {
Rt = 0;
}
}}, true);
427: divwe({{
int32_t src1 = Ra_sw;
int32_t src2 = Rb_sw;
int64_t res;
if ((src1 != INT32_MIN || src2 != -1) && src2 != 0) {
res = ((int64_t)src1 << 32) / src2;
if (res == (int32_t)res) {
Rt = (uint32_t)res;
} else {
Rt = 0;
setOV = true;
}
} else {
Rt = 0;
setOV = true;
}
}}, true);
457: divdu({{
uint64_t src1 = Ra;
uint64_t src2 = Rb;
if (src2 != 0) {
Rt = src1 / src2;
} else {
Rt = 0;
setOV = true;
}
}}, true);
459: divwu({{
uint32_t src1 = Ra_uw;
uint32_t src2 = Rb_uw;
if (src2 != 0) {
Rt = src1 / src2;
} else {
Rt = 0;
setOV = true;
}
}}, true);
489: divd({{
int64_t src1 = Ra_sd;
int64_t src2 = Rb_sd;
if ((src1 != INT64_MIN || src2 != -1) && src2 != 0) {
Rt = src1 / src2;
} else {
Rt = 0;
setOV = true;
}
}}, true);
491: divw({{
int32_t src1 = Ra_sw;
int32_t src2 = Rb_sw;
if ((src1 != INT32_MIN || src2 != -1) && src2 != 0) {
Rt = (uint32_t)(src1 / src2);
} else {
Rt = 0;
setOV = true;
}
}}, true);
}
default: decode XFX_XO {
format IntOp {
19: mfcr({{ Rt = CR; }});
144: mtcrf({{
uint32_t mask = 0;
for (int i = 0; i < 8; ++i) {
if (((FXM >> i) & 0x1) == 0x1) {
mask |= 0xf << (4 * i);
}
}
CR = (Rs & mask) | (CR & ~mask);
}});
339: decode SPR {
0x20: mfxer({{ Rt = XER; }});
0x100: mflr({{ Rt = LR; }});
0x120: mfctr({{ Rt = CTR; }});
0x1f9: mftar({{ Rt = TAR; }});
}
467: decode SPR {
0x20: mtxer({{ XER = Rs; }});
0x100: mtlr({{ LR = Rs; }});
0x120: mtctr({{ CTR = Rs; }});
0x1f9: mttar({{ TAR = Rs; }});
}
512: mcrxr({{
CR = insertCRField(CR, BF, XER<31:28>);
XER = XER<27:0>;
}});
}
}
}
}
32: LoadDispOp::lwz({{ Rt = Mem_uw; }});
33: LoadDispUpdateOp::lwzu({{ Rt = Mem_uw; }});
34: LoadDispOp::lbz({{ Rt = Mem_ub; }});
35: LoadDispUpdateOp::lbzu({{ Rt = Mem_ub; }});
36: StoreDispOp::stw({{ Mem_uw = Rs_uw; }});
37: StoreDispUpdateOp::stwu({{ Mem_uw = Rs_uw; }});
38: StoreDispOp::stb({{ Mem_ub = Rs_ub; }});
39: StoreDispUpdateOp::stbu({{ Mem_ub = Rs_ub; }});
40: LoadDispOp::lhz({{ Rt = Mem_uh; }});
41: LoadDispUpdateOp::lhzu({{ Rt = Mem_uh; }});
42: LoadDispOp::lha({{ Rt = Mem_sh; }});
43: LoadDispUpdateOp::lhau({{ Rt = Mem_sh; }});
44: StoreDispOp::sth({{ Mem_uh = Rs_uh; }});
45: StoreDispUpdateOp::sthu({{ Mem_uh = Rs_uh; }});
48: LoadDispOp::lfs({{ Ft_sf = Mem_sf; }});
49: LoadDispUpdateOp::lfsu({{ Ft_sf = Mem_sf; }});
50: LoadDispOp::lfd({{ Ft = Mem_df; }});
51: LoadDispUpdateOp::lfdu({{ Ft = Mem_df; }});
52: StoreDispOp::stfs({{ Mem_sf = Fs_sf; }});
53: StoreDispUpdateOp::stfsu({{ Mem_sf = Fs_sf; }});
54: StoreDispOp::stfd({{ Mem_df = Fs; }});
55: StoreDispUpdateOp::stfdu({{ Mem_df = Fs; }});
58: decode DS_XO {
0: LoadDispShiftOp::ld({{ Rt = Mem; }});
1: LoadDispShiftUpdateOp::ldu({{ Rt = Mem; }});
2: LoadDispShiftOp::lwa({{ Rt = Mem_sw; }});
}
format FloatArithOp {
59: decode A_XO {
18: fdivs({{ Ft = Fa / Fb; }});
20: fsubs({{ Ft = Fa - Fb; }});
21: fadds({{ Ft = Fa + Fb; }});
25: fmuls({{ Ft = Fa * Fc; }});
28: fmsubs({{ Ft = (Fa * Fc) - Fb; }});
29: fmadds({{ Ft = (Fa * Fc) + Fb; }});
30: fnmsubs({{ Ft = -((Fa * Fc) - Fb); }});
31: fnmadds({{ Ft = -((Fa * Fc) + Fb); }});
}
}
62: decode DS_XO {
0: StoreDispShiftOp::std({{ Mem = Rs; }});
1: StoreDispShiftUpdateOp::stdu({{ Mem = Rs; }});
}
63: decode A_XO {
format FloatArithOp {
20: fsub({{ Ft = Fa - Fb; }});
21: fadd({{ Ft = Fa + Fb; }});
25: fmul({{ Ft = Fa * Fc; }});
18: fdiv({{ Ft = Fa / Fb; }});
29: fmadd({{ Ft = (Fa * Fc) + Fb; }});
28: fmsub({{ Ft = (Fa * Fc) - Fb; }});
31: fnmadd({{ Ft = -((Fa * Fc) + Fb); }});
30: fnmsub({{ Ft = -((Fa * Fc) - Fb); }});
}
default: decode X_XO {
0: FloatOp::fcmpu({{
uint32_t c = makeCRField(Fa, Fb);
Fpscr fpscr = FPSCR;
fpscr.fprf.fpcc = c;
FPSCR = fpscr;
CR = insertCRField(CR, BF, c);
}});
8: FloatRCCheckOp::fcpsgn({{
Ft_ud = Fb_ud;
Ft_ud = insertBits(Ft_ud, 63, Fa_ud<63:63>);
}});
format FloatConvertOp {
12: frsp({{ Ft_sf = Fb; }});
15: fctiwz({{ Ft_sw = (int32_t)trunc(Fb); }});
}
format FloatRCCheckOp {
38: mtfsb1({{ FPSCR = insertBits(FPSCR, 31 - BT, 1); }});
40: fneg({{ Ft = -Fb; }});
70: mtfsb0({{ FPSCR = insertBits(FPSCR, 31 - BT, 0); }});
72: fmr({{ Ft = Fb; }});
134: mtfsfi({{
FPSCR = insertCRField(FPSCR, BF + (8 * (1 - W_FIELD)),
U_FIELD);
}});
136: fnabs({{
Ft_ud = Fb_ud;
Ft_ud = insertBits(Ft_ud, 63, 1);
}});
264: fabs({{
Ft_ud = Fb_ud;
Ft_ud = insertBits(Ft_ud, 63, 0);
}});
583: mffs({{ Ft_ud = FPSCR; }});
default: decode XFL_XO {
711: mtfsf({{
if (L_FIELD == 1) { FPSCR = Fb_ud; }
else {
for (int i = 0; i < 8; ++i) {
if (bits(FLM, i) == 1) {
int k = 4 * (i + (8 * (1 - W_FIELD)));
FPSCR = insertBits(FPSCR, k + 3, k,
bits(Fb_ud, k + 3, k));
}
}
}
}});
}
}
}
}
}