| // -*- mode:c++ -*- |
| |
| // Copyright (c) 2013 ARM Limited |
| // All rights reserved |
| // |
| // The license below extends only to copyright in the software and shall |
| // not be construed as granting a license to any other intellectual |
| // property including but not limited to intellectual property relating |
| // to a hardware implementation of the functionality of the software |
| // licensed hereunder. You may use the software subject to the license |
| // terms below provided that you ensure that this notice is replicated |
| // unmodified and in its entirety in all distributions of the software, |
| // modified or unmodified, in source code or in binary form. |
| // |
| // Copyright (c) 2003-2006 The Regents of The University of Michigan |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are |
| // met: redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer; |
| // redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution; |
| // neither the name of the copyright holders nor the names of its |
| // contributors may be used to endorse or promote products derived from |
| // this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| // |
| // Authors: Steve Reinhardt |
| |
| //////////////////////////////////////////////////////////////////// |
| // |
| // The actual decoder specification |
| // |
| |
| decode OPCODE default Unknown::unknown() { |
| |
| format LoadAddress { |
| 0x08: lda({{ Ra = Rb + disp; }}); |
| 0x09: ldah({{ Ra = Rb + (disp << 16); }}); |
| } |
| |
| format LoadOrNop { |
| 0x0a: ldbu({{ Ra_uq = Mem_ub; }}); |
| 0x0c: ldwu({{ Ra_uq = Mem_uw; }}); |
| 0x0b: ldq_u({{ Ra = Mem_uq; }}, ea_code = {{ EA = (Rb + disp) & ~7; }}); |
| 0x23: ldt({{ Fa = Mem_df; }}); |
| 0x2a: ldl_l({{ Ra_sl = Mem_sl; }}, mem_flags = LLSC); |
| 0x2b: ldq_l({{ Ra_uq = Mem_uq; }}, mem_flags = LLSC); |
| } |
| |
| format LoadOrPrefetch { |
| 0x28: ldl({{ Ra_sl = Mem_sl; }}); |
| 0x29: ldq({{ Ra_uq = Mem_uq; }}, pf_flags = EVICT_NEXT); |
| // IsFloating flag on lds gets the prefetch to disassemble |
| // using f31 instead of r31... funcitonally it's unnecessary |
| 0x22: lds({{ Fa_uq = s_to_t(Mem_ul); }}, |
| pf_flags = PF_EXCLUSIVE, inst_flags = IsFloating); |
| } |
| |
| format Store { |
| 0x0e: stb({{ Mem_ub = Ra<7:0>; }}); |
| 0x0d: stw({{ Mem_uw = Ra<15:0>; }}); |
| 0x2c: stl({{ Mem_ul = Ra<31:0>; }}); |
| 0x2d: stq({{ Mem_uq = Ra_uq; }}); |
| 0x0f: stq_u({{ Mem_uq = Ra_uq; }}, {{ EA = (Rb + disp) & ~7; }}); |
| 0x26: sts({{ Mem_ul = t_to_s(Fa_uq); }}); |
| 0x27: stt({{ Mem_df = Fa; }}); |
| } |
| |
| format StoreCond { |
| 0x2e: stl_c({{ Mem_ul = Ra<31:0>; }}, |
| {{ |
| uint64_t tmp = write_result; |
| // see stq_c |
| Ra = (tmp == 0 || tmp == 1) ? tmp : Ra; |
| if (tmp == 1) { |
| xc->setStCondFailures(0); |
| } |
| }}, mem_flags = LLSC, inst_flags = IsStoreConditional); |
| 0x2f: stq_c({{ Mem_uq = Ra; }}, |
| {{ |
| uint64_t tmp = write_result; |
| // If the write operation returns 0 or 1, then |
| // this was a conventional store conditional, |
| // and the value indicates the success/failure |
| // of the operation. If another value is |
| // returned, then this was a Turbolaser |
| // mailbox access, and we don't update the |
| // result register at all. |
| Ra = (tmp == 0 || tmp == 1) ? tmp : Ra; |
| if (tmp == 1) { |
| // clear failure counter... this is |
| // non-architectural and for debugging |
| // only. |
| xc->setStCondFailures(0); |
| } |
| }}, mem_flags = LLSC, inst_flags = IsStoreConditional); |
| } |
| |
| format IntegerOperate { |
| |
| 0x10: decode INTFUNC { // integer arithmetic operations |
| |
| 0x00: addl({{ Rc_sl = Ra_sl + Rb_or_imm_sl; }}); |
| 0x40: addlv({{ |
| int32_t tmp = Ra_sl + Rb_or_imm_sl; |
| // signed overflow occurs when operands have same sign |
| // and sign of result does not match. |
| if (Ra_sl<31:> == Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc_sl = tmp; |
| }}); |
| 0x02: s4addl({{ Rc_sl = (Ra_sl << 2) + Rb_or_imm_sl; }}); |
| 0x12: s8addl({{ Rc_sl = (Ra_sl << 3) + Rb_or_imm_sl; }}); |
| |
| 0x20: addq({{ Rc = Ra + Rb_or_imm; }}); |
| 0x60: addqv({{ |
| uint64_t tmp = Ra + Rb_or_imm; |
| // signed overflow occurs when operands have same sign |
| // and sign of result does not match. |
| if (Ra<63:> == Rb_or_imm<63:> && tmp<63:> != Ra<63:>) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc = tmp; |
| }}); |
| 0x22: s4addq({{ Rc = (Ra << 2) + Rb_or_imm; }}); |
| 0x32: s8addq({{ Rc = (Ra << 3) + Rb_or_imm; }}); |
| |
| 0x09: subl({{ Rc_sl = Ra_sl - Rb_or_imm_sl; }}); |
| 0x49: sublv({{ |
| int32_t tmp = Ra_sl - Rb_or_imm_sl; |
| // signed overflow detection is same as for add, |
| // except we need to look at the *complemented* |
| // sign bit of the subtrahend (Rb), i.e., if the initial |
| // signs are the *same* then no overflow can occur |
| if (Ra_sl<31:> != Rb_or_imm_sl<31:> && tmp<31:> != Ra_sl<31:>) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc_sl = tmp; |
| }}); |
| 0x0b: s4subl({{ Rc_sl = (Ra_sl << 2) - Rb_or_imm_sl; }}); |
| 0x1b: s8subl({{ Rc_sl = (Ra_sl << 3) - Rb_or_imm_sl; }}); |
| |
| 0x29: subq({{ Rc = Ra - Rb_or_imm; }}); |
| 0x69: subqv({{ |
| uint64_t tmp = Ra - Rb_or_imm; |
| // signed overflow detection is same as for add, |
| // except we need to look at the *complemented* |
| // sign bit of the subtrahend (Rb), i.e., if the initial |
| // signs are the *same* then no overflow can occur |
| if (Ra<63:> != Rb_or_imm<63:> && tmp<63:> != Ra<63:>) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc = tmp; |
| }}); |
| 0x2b: s4subq({{ Rc = (Ra << 2) - Rb_or_imm; }}); |
| 0x3b: s8subq({{ Rc = (Ra << 3) - Rb_or_imm; }}); |
| |
| 0x2d: cmpeq({{ Rc = (Ra == Rb_or_imm); }}); |
| 0x6d: cmple({{ Rc = (Ra_sq <= Rb_or_imm_sq); }}); |
| 0x4d: cmplt({{ Rc = (Ra_sq < Rb_or_imm_sq); }}); |
| 0x3d: cmpule({{ Rc = (Ra_uq <= Rb_or_imm_uq); }}); |
| 0x1d: cmpult({{ Rc = (Ra_uq < Rb_or_imm_uq); }}); |
| |
| 0x0f: cmpbge({{ |
| int hi = 7; |
| int lo = 0; |
| uint64_t tmp = 0; |
| for (int i = 0; i < 8; ++i) { |
| tmp |= (Ra_uq<hi:lo> >= Rb_or_imm_uq<hi:lo>) << i; |
| hi += 8; |
| lo += 8; |
| } |
| Rc = tmp; |
| }}); |
| } |
| |
| 0x11: decode INTFUNC { // integer logical operations |
| |
| 0x00: and({{ Rc = Ra & Rb_or_imm; }}); |
| 0x08: bic({{ Rc = Ra & ~Rb_or_imm; }}); |
| 0x20: bis({{ Rc = Ra | Rb_or_imm; }}); |
| 0x28: ornot({{ Rc = Ra | ~Rb_or_imm; }}); |
| 0x40: xor({{ Rc = Ra ^ Rb_or_imm; }}); |
| 0x48: eqv({{ Rc = Ra ^ ~Rb_or_imm; }}); |
| |
| // conditional moves |
| 0x14: cmovlbs({{ Rc = ((Ra & 1) == 1) ? Rb_or_imm : Rc; }}); |
| 0x16: cmovlbc({{ Rc = ((Ra & 1) == 0) ? Rb_or_imm : Rc; }}); |
| 0x24: cmoveq({{ Rc = (Ra == 0) ? Rb_or_imm : Rc; }}); |
| 0x26: cmovne({{ Rc = (Ra != 0) ? Rb_or_imm : Rc; }}); |
| 0x44: cmovlt({{ Rc = (Ra_sq < 0) ? Rb_or_imm : Rc; }}); |
| 0x46: cmovge({{ Rc = (Ra_sq >= 0) ? Rb_or_imm : Rc; }}); |
| 0x64: cmovle({{ Rc = (Ra_sq <= 0) ? Rb_or_imm : Rc; }}); |
| 0x66: cmovgt({{ Rc = (Ra_sq > 0) ? Rb_or_imm : Rc; }}); |
| |
| // For AMASK, RA must be R31. |
| 0x61: decode RA { |
| 31: amask({{ Rc = Rb_or_imm & ~ULL(0x17); }}); |
| } |
| |
| // For IMPLVER, RA must be R31 and the B operand |
| // must be the immediate value 1. |
| 0x6c: decode RA { |
| 31: decode IMM { |
| 1: decode INTIMM { |
| // return EV5 for FullSystem and EV6 otherwise |
| 1: implver({{ Rc = FullSystem ? 1 : 2 }}); |
| } |
| } |
| } |
| |
| // The mysterious 11.25... |
| 0x25: WarnUnimpl::eleven25(); |
| } |
| |
| 0x12: decode INTFUNC { |
| 0x39: sll({{ Rc = Ra << Rb_or_imm<5:0>; }}); |
| 0x34: srl({{ Rc = Ra_uq >> Rb_or_imm<5:0>; }}); |
| 0x3c: sra({{ Rc = Ra_sq >> Rb_or_imm<5:0>; }}); |
| |
| 0x02: mskbl({{ Rc = Ra & ~(mask( 8) << (Rb_or_imm<2:0> * 8)); }}); |
| 0x12: mskwl({{ Rc = Ra & ~(mask(16) << (Rb_or_imm<2:0> * 8)); }}); |
| 0x22: mskll({{ Rc = Ra & ~(mask(32) << (Rb_or_imm<2:0> * 8)); }}); |
| 0x32: mskql({{ Rc = Ra & ~(mask(64) << (Rb_or_imm<2:0> * 8)); }}); |
| |
| 0x52: mskwh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra & ~(mask(16) >> (64 - 8 * bv))) : Ra; |
| }}); |
| 0x62: msklh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra & ~(mask(32) >> (64 - 8 * bv))) : Ra; |
| }}); |
| 0x72: mskqh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra & ~(mask(64) >> (64 - 8 * bv))) : Ra; |
| }}); |
| |
| 0x06: extbl({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))< 7:0>; }}); |
| 0x16: extwl({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))<15:0>; }}); |
| 0x26: extll({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8))<31:0>; }}); |
| 0x36: extql({{ Rc = (Ra_uq >> (Rb_or_imm<2:0> * 8)); }}); |
| |
| 0x5a: extwh({{ |
| Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>)<15:0>; }}); |
| 0x6a: extlh({{ |
| Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>)<31:0>; }}); |
| 0x7a: extqh({{ |
| Rc = (Ra << (64 - (Rb_or_imm<2:0> * 8))<5:0>); }}); |
| |
| 0x0b: insbl({{ Rc = Ra< 7:0> << (Rb_or_imm<2:0> * 8); }}); |
| 0x1b: inswl({{ Rc = Ra<15:0> << (Rb_or_imm<2:0> * 8); }}); |
| 0x2b: insll({{ Rc = Ra<31:0> << (Rb_or_imm<2:0> * 8); }}); |
| 0x3b: insql({{ Rc = Ra << (Rb_or_imm<2:0> * 8); }}); |
| |
| 0x57: inswh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra_uq<15:0> >> (64 - 8 * bv)) : 0; |
| }}); |
| 0x67: inslh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra_uq<31:0> >> (64 - 8 * bv)) : 0; |
| }}); |
| 0x77: insqh({{ |
| int bv = Rb_or_imm<2:0>; |
| Rc = bv ? (Ra_uq >> (64 - 8 * bv)) : 0; |
| }}); |
| |
| 0x30: zap({{ |
| uint64_t zapmask = 0; |
| for (int i = 0; i < 8; ++i) { |
| if (Rb_or_imm<i:>) |
| zapmask |= (mask(8) << (i * 8)); |
| } |
| Rc = Ra & ~zapmask; |
| }}); |
| 0x31: zapnot({{ |
| uint64_t zapmask = 0; |
| for (int i = 0; i < 8; ++i) { |
| if (!Rb_or_imm<i:>) |
| zapmask |= (mask(8) << (i * 8)); |
| } |
| Rc = Ra & ~zapmask; |
| }}); |
| } |
| |
| 0x13: decode INTFUNC { // integer multiplies |
| 0x00: mull({{ Rc_sl = Ra_sl * Rb_or_imm_sl; }}, IntMultOp); |
| 0x20: mulq({{ Rc = Ra * Rb_or_imm; }}, IntMultOp); |
| 0x30: umulh({{ |
| uint64_t hi, lo; |
| mul128(Ra, Rb_or_imm, hi, lo); |
| Rc = hi; |
| }}, IntMultOp); |
| 0x40: mullv({{ |
| // 32-bit multiply with trap on overflow |
| int64_t Rax = Ra_sl; // sign extended version of Ra_sl |
| int64_t Rbx = Rb_or_imm_sl; |
| int64_t tmp = Rax * Rbx; |
| // To avoid overflow, all the upper 32 bits must match |
| // the sign bit of the lower 32. We code this as |
| // checking the upper 33 bits for all 0s or all 1s. |
| uint64_t sign_bits = tmp<63:31>; |
| if (sign_bits != 0 && sign_bits != mask(33)) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc_sl = tmp<31:0>; |
| }}, IntMultOp); |
| 0x60: mulqv({{ |
| // 64-bit multiply with trap on overflow |
| uint64_t hi, lo; |
| mul128(Ra, Rb_or_imm, hi, lo); |
| // all the upper 64 bits must match the sign bit of |
| // the lower 64 |
| if (!((hi == 0 && lo<63:> == 0) || |
| (hi == mask(64) && lo<63:> == 1))) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Rc = lo; |
| }}, IntMultOp); |
| } |
| |
| 0x1c: decode INTFUNC { |
| 0x00: decode RA { 31: sextb({{ Rc_sb = Rb_or_imm< 7:0>; }}); } |
| 0x01: decode RA { 31: sextw({{ Rc_sw = Rb_or_imm<15:0>; }}); } |
| |
| 0x30: ctpop({{ |
| uint64_t count = 0; |
| for (int i = 0; Rb<63:i>; ++i) { |
| if (Rb<i:i> == 0x1) |
| ++count; |
| } |
| Rc = count; |
| }}, IntAluOp); |
| |
| 0x31: perr({{ |
| uint64_t temp = 0; |
| int hi = 7; |
| int lo = 0; |
| for (int i = 0; i < 8; ++i) { |
| uint8_t ra_ub = Ra_uq<hi:lo>; |
| uint8_t rb_ub = Rb_uq<hi:lo>; |
| temp += (ra_ub >= rb_ub) ? |
| (ra_ub - rb_ub) : (rb_ub - ra_ub); |
| hi += 8; |
| lo += 8; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x32: ctlz({{ |
| uint64_t count = 0; |
| uint64_t temp = Rb; |
| if (temp<63:32>) temp >>= 32; else count += 32; |
| if (temp<31:16>) temp >>= 16; else count += 16; |
| if (temp<15:8>) temp >>= 8; else count += 8; |
| if (temp<7:4>) temp >>= 4; else count += 4; |
| if (temp<3:2>) temp >>= 2; else count += 2; |
| if (temp<1:1>) temp >>= 1; else count += 1; |
| if ((temp<0:0>) != 0x1) count += 1; |
| Rc = count; |
| }}, IntAluOp); |
| |
| 0x33: cttz({{ |
| uint64_t count = 0; |
| uint64_t temp = Rb; |
| if (!(temp<31:0>)) { temp >>= 32; count += 32; } |
| if (!(temp<15:0>)) { temp >>= 16; count += 16; } |
| if (!(temp<7:0>)) { temp >>= 8; count += 8; } |
| if (!(temp<3:0>)) { temp >>= 4; count += 4; } |
| if (!(temp<1:0>)) { temp >>= 2; count += 2; } |
| if (!(temp<0:0> & ULL(0x1))) { |
| temp >>= 1; count += 1; |
| } |
| if (!(temp<0:0> & ULL(0x1))) count += 1; |
| Rc = count; |
| }}, IntAluOp); |
| |
| |
| 0x34: unpkbw({{ |
| Rc = (Rb_uq<7:0> |
| | (Rb_uq<15:8> << 16) |
| | (Rb_uq<23:16> << 32) |
| | (Rb_uq<31:24> << 48)); |
| }}, IntAluOp); |
| |
| 0x35: unpkbl({{ |
| Rc = (Rb_uq<7:0> | (Rb_uq<15:8> << 32)); |
| }}, IntAluOp); |
| |
| 0x36: pkwb({{ |
| Rc = (Rb_uq<7:0> |
| | (Rb_uq<23:16> << 8) |
| | (Rb_uq<39:32> << 16) |
| | (Rb_uq<55:48> << 24)); |
| }}, IntAluOp); |
| |
| 0x37: pklb({{ |
| Rc = (Rb_uq<7:0> | (Rb_uq<39:32> << 8)); |
| }}, IntAluOp); |
| |
| 0x38: minsb8({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 56; |
| for (int i = 7; i >= 0; --i) { |
| int8_t ra_sb = Ra_uq<hi:lo>; |
| int8_t rb_sb = Rb_uq<hi:lo>; |
| temp = ((temp << 8) |
| | ((ra_sb < rb_sb) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 8; |
| lo -= 8; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x39: minsw4({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 48; |
| for (int i = 3; i >= 0; --i) { |
| int16_t ra_sw = Ra_uq<hi:lo>; |
| int16_t rb_sw = Rb_uq<hi:lo>; |
| temp = ((temp << 16) |
| | ((ra_sw < rb_sw) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 16; |
| lo -= 16; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3a: minub8({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 56; |
| for (int i = 7; i >= 0; --i) { |
| uint8_t ra_ub = Ra_uq<hi:lo>; |
| uint8_t rb_ub = Rb_uq<hi:lo>; |
| temp = ((temp << 8) |
| | ((ra_ub < rb_ub) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 8; |
| lo -= 8; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3b: minuw4({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 48; |
| for (int i = 3; i >= 0; --i) { |
| uint16_t ra_sw = Ra_uq<hi:lo>; |
| uint16_t rb_sw = Rb_uq<hi:lo>; |
| temp = ((temp << 16) |
| | ((ra_sw < rb_sw) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 16; |
| lo -= 16; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3c: maxub8({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 56; |
| for (int i = 7; i >= 0; --i) { |
| uint8_t ra_ub = Ra_uq<hi:lo>; |
| uint8_t rb_ub = Rb_uq<hi:lo>; |
| temp = ((temp << 8) |
| | ((ra_ub > rb_ub) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 8; |
| lo -= 8; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3d: maxuw4({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 48; |
| for (int i = 3; i >= 0; --i) { |
| uint16_t ra_uw = Ra_uq<hi:lo>; |
| uint16_t rb_uw = Rb_uq<hi:lo>; |
| temp = ((temp << 16) |
| | ((ra_uw > rb_uw) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 16; |
| lo -= 16; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3e: maxsb8({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 56; |
| for (int i = 7; i >= 0; --i) { |
| int8_t ra_sb = Ra_uq<hi:lo>; |
| int8_t rb_sb = Rb_uq<hi:lo>; |
| temp = ((temp << 8) |
| | ((ra_sb > rb_sb) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 8; |
| lo -= 8; |
| } |
| Rc = temp; |
| }}); |
| |
| 0x3f: maxsw4({{ |
| uint64_t temp = 0; |
| int hi = 63; |
| int lo = 48; |
| for (int i = 3; i >= 0; --i) { |
| int16_t ra_sw = Ra_uq<hi:lo>; |
| int16_t rb_sw = Rb_uq<hi:lo>; |
| temp = ((temp << 16) |
| | ((ra_sw > rb_sw) ? Ra_uq<hi:lo> |
| : Rb_uq<hi:lo>)); |
| hi -= 16; |
| lo -= 16; |
| } |
| Rc = temp; |
| }}); |
| |
| format BasicOperateWithNopCheck { |
| 0x70: decode RB { |
| 31: ftoit({{ Rc = Fa_uq; }}, FloatCvtOp); |
| } |
| 0x78: decode RB { |
| 31: ftois({{ Rc_sl = t_to_s(Fa_uq); }}, |
| FloatCvtOp); |
| } |
| } |
| } |
| } |
| |
| // Conditional branches. |
| format CondBranch { |
| 0x39: beq({{ cond = (Ra == 0); }}); |
| 0x3d: bne({{ cond = (Ra != 0); }}); |
| 0x3e: bge({{ cond = (Ra_sq >= 0); }}); |
| 0x3f: bgt({{ cond = (Ra_sq > 0); }}); |
| 0x3b: ble({{ cond = (Ra_sq <= 0); }}); |
| 0x3a: blt({{ cond = (Ra_sq < 0); }}); |
| 0x38: blbc({{ cond = ((Ra & 1) == 0); }}); |
| 0x3c: blbs({{ cond = ((Ra & 1) == 1); }}); |
| |
| 0x31: fbeq({{ cond = (Fa == 0); }}); |
| 0x35: fbne({{ cond = (Fa != 0); }}); |
| 0x36: fbge({{ cond = (Fa >= 0); }}); |
| 0x37: fbgt({{ cond = (Fa > 0); }}); |
| 0x33: fble({{ cond = (Fa <= 0); }}); |
| 0x32: fblt({{ cond = (Fa < 0); }}); |
| } |
| |
| // unconditional branches |
| format UncondBranch { |
| 0x30: br(); |
| 0x34: bsr(IsCall); |
| } |
| |
| // indirect branches |
| 0x1a: decode JMPFUNC { |
| format Jump { |
| 0: jmp(); |
| 1: jsr(IsCall); |
| 2: ret(IsReturn); |
| 3: jsr_coroutine(IsCall, IsReturn); |
| } |
| } |
| |
| // Square root and integer-to-FP moves |
| 0x14: decode FP_SHORTFUNC { |
| // Integer to FP register moves must have RB == 31 |
| 0x4: decode RB { |
| 31: decode FP_FULLFUNC { |
| format BasicOperateWithNopCheck { |
| 0x004: itofs({{ Fc_uq = s_to_t(Ra_ul); }}, FloatCvtOp); |
| 0x024: itoft({{ Fc_uq = Ra_uq; }}, FloatCvtOp); |
| 0x014: FailUnimpl::itoff(); // VAX-format conversion |
| } |
| } |
| } |
| |
| // Square root instructions must have FA == 31 |
| 0xb: decode FA { |
| 31: decode FP_TYPEFUNC { |
| format FloatingPointOperate { |
| #if SS_COMPATIBLE_FP |
| 0x0b: sqrts({{ |
| if (Fb < 0.0) |
| fault = std::make_shared<ArithmeticFault>(); |
| Fc = sqrt(Fb); |
| }}, FloatSqrtOp); |
| #else |
| 0x0b: sqrts({{ |
| if (Fb_sf < 0.0) |
| fault = std::make_shared<ArithmeticFault>(); |
| Fc_sf = sqrt(Fb_sf); |
| }}, FloatSqrtOp); |
| #endif |
| 0x2b: sqrtt({{ |
| if (Fb < 0.0) |
| fault = std::make_shared<ArithmeticFault>(); |
| Fc = sqrt(Fb); |
| }}, FloatSqrtOp); |
| } |
| } |
| } |
| |
| // VAX-format sqrtf and sqrtg are not implemented |
| 0xa: FailUnimpl::sqrtfg(); |
| } |
| |
| // IEEE floating point |
| 0x16: decode FP_SHORTFUNC_TOP2 { |
| // The top two bits of the short function code break this |
| // space into four groups: binary ops, compares, reserved, and |
| // conversions. See Table 4-12 of AHB. There are different |
| // special cases in these different groups, so we decode on |
| // these top two bits first just to select a decode strategy. |
| // Most of these instructions may have various trapping and |
| // rounding mode flags set; these are decoded in the |
| // FloatingPointDecode template used by the |
| // FloatingPointOperate format. |
| |
| // add/sub/mul/div: just decode on the short function code |
| // and source type. All valid trapping and rounding modes apply. |
| 0: decode FP_TRAPMODE { |
| // check for valid trapping modes here |
| 0,1,5,7: decode FP_TYPEFUNC { |
| format FloatingPointOperate { |
| #if SS_COMPATIBLE_FP |
| 0x00: adds({{ Fc = Fa + Fb; }}); |
| 0x01: subs({{ Fc = Fa - Fb; }}); |
| 0x02: muls({{ Fc = Fa * Fb; }}, FloatMultOp); |
| 0x03: divs({{ Fc = Fa / Fb; }}, FloatDivOp); |
| #else |
| 0x00: adds({{ Fc_sf = Fa_sf + Fb_sf; }}); |
| 0x01: subs({{ Fc_sf = Fa_sf - Fb_sf; }}); |
| 0x02: muls({{ Fc_sf = Fa_sf * Fb_sf; }}, FloatMultOp); |
| 0x03: divs({{ Fc_sf = Fa_sf / Fb_sf; }}, FloatDivOp); |
| #endif |
| |
| 0x20: addt({{ Fc = Fa + Fb; }}); |
| 0x21: subt({{ Fc = Fa - Fb; }}); |
| 0x22: mult({{ Fc = Fa * Fb; }}, FloatMultOp); |
| 0x23: divt({{ Fc = Fa / Fb; }}, FloatDivOp); |
| } |
| } |
| } |
| |
| // Floating-point compare instructions must have the default |
| // rounding mode, and may use the default trapping mode or |
| // /SU. Both trapping modes are treated the same by M5; the |
| // only difference on the real hardware (as far a I can tell) |
| // is that without /SU you'd get an imprecise trap if you |
| // tried to compare a NaN with something else (instead of an |
| // "unordered" result). |
| 1: decode FP_FULLFUNC { |
| format BasicOperateWithNopCheck { |
| 0x0a5, 0x5a5: cmpteq({{ Fc = (Fa == Fb) ? 2.0 : 0.0; }}, |
| FloatCmpOp); |
| 0x0a7, 0x5a7: cmptle({{ Fc = (Fa <= Fb) ? 2.0 : 0.0; }}, |
| FloatCmpOp); |
| 0x0a6, 0x5a6: cmptlt({{ Fc = (Fa < Fb) ? 2.0 : 0.0; }}, |
| FloatCmpOp); |
| 0x0a4, 0x5a4: cmptun({{ // unordered |
| Fc = (!(Fa < Fb) && !(Fa == Fb) && !(Fa > Fb)) ? 2.0 : 0.0; |
| }}, FloatCmpOp); |
| } |
| } |
| |
| // The FP-to-integer and integer-to-FP conversion insts |
| // require that FA be 31. |
| 3: decode FA { |
| 31: decode FP_TYPEFUNC { |
| format FloatingPointOperate { |
| 0x2f: decode FP_ROUNDMODE { |
| format FPFixedRounding { |
| // "chopped" i.e. round toward zero |
| 0: cvttq({{ Fc_sq = (int64_t)trunc(Fb); }}, |
| Chopped); |
| // round to minus infinity |
| 1: cvttq({{ Fc_sq = (int64_t)floor(Fb); }}, |
| MinusInfinity); |
| } |
| default: cvttq({{ Fc_sq = (int64_t)nearbyint(Fb); }}); |
| } |
| |
| // The cvtts opcode is overloaded to be cvtst if the trap |
| // mode is 2 or 6 (which are not valid otherwise) |
| 0x2c: decode FP_FULLFUNC { |
| format BasicOperateWithNopCheck { |
| // trap on denorm version "cvtst/s" is |
| // simulated same as cvtst |
| 0x2ac, 0x6ac: cvtst({{ Fc = Fb_sf; }}); |
| } |
| default: cvtts({{ Fc_sf = Fb; }}); |
| } |
| |
| // The trapping mode for integer-to-FP conversions |
| // must be /SUI or nothing; /U and /SU are not |
| // allowed. The full set of rounding modes are |
| // supported though. |
| 0x3c: decode FP_TRAPMODE { |
| 0,7: cvtqs({{ Fc_sf = Fb_sq; }}); |
| } |
| 0x3e: decode FP_TRAPMODE { |
| 0,7: cvtqt({{ Fc = Fb_sq; }}); |
| } |
| } |
| } |
| } |
| } |
| |
| // misc FP operate |
| 0x17: decode FP_FULLFUNC { |
| format BasicOperateWithNopCheck { |
| 0x010: cvtlq({{ |
| Fc_sl = (Fb_uq<63:62> << 30) | Fb_uq<58:29>; |
| }}); |
| 0x030: cvtql({{ |
| Fc_uq = (Fb_uq<31:30> << 62) | (Fb_uq<29:0> << 29); |
| }}); |
| |
| // We treat the precise & imprecise trapping versions of |
| // cvtql identically. |
| 0x130, 0x530: cvtqlv({{ |
| // To avoid overflow, all the upper 32 bits must match |
| // the sign bit of the lower 32. We code this as |
| // checking the upper 33 bits for all 0s or all 1s. |
| uint64_t sign_bits = Fb_uq<63:31>; |
| if (sign_bits != 0 && sign_bits != mask(33)) |
| fault = std::make_shared<IntegerOverflowFault>(); |
| Fc_uq = (Fb_uq<31:30> << 62) | (Fb_uq<29:0> << 29); |
| }}); |
| |
| 0x020: cpys({{ // copy sign |
| Fc_uq = (Fa_uq<63:> << 63) | Fb_uq<62:0>; |
| }}); |
| 0x021: cpysn({{ // copy sign negated |
| Fc_uq = (~Fa_uq<63:> << 63) | Fb_uq<62:0>; |
| }}); |
| 0x022: cpyse({{ // copy sign and exponent |
| Fc_uq = (Fa_uq<63:52> << 52) | Fb_uq<51:0>; |
| }}); |
| |
| 0x02a: fcmoveq({{ Fc = (Fa == 0) ? Fb : Fc; }}); |
| 0x02b: fcmovne({{ Fc = (Fa != 0) ? Fb : Fc; }}); |
| 0x02c: fcmovlt({{ Fc = (Fa < 0) ? Fb : Fc; }}); |
| 0x02d: fcmovge({{ Fc = (Fa >= 0) ? Fb : Fc; }}); |
| 0x02e: fcmovle({{ Fc = (Fa <= 0) ? Fb : Fc; }}); |
| 0x02f: fcmovgt({{ Fc = (Fa > 0) ? Fb : Fc; }}); |
| |
| 0x024: mt_fpcr({{ FPCR = Fa_uq; }}, IsIprAccess); |
| 0x025: mf_fpcr({{ Fa_uq = FPCR; }}, IsIprAccess); |
| } |
| } |
| |
| // miscellaneous mem-format ops |
| 0x18: decode MEMFUNC { |
| format WarnUnimpl { |
| 0x8000: fetch(); |
| 0xa000: fetch_m(); |
| 0xe800: ecb(); |
| } |
| |
| format MiscPrefetch { |
| 0xf800: wh64({{ EA = Rb & ~ULL(63); }}, |
| {{ ; }}, |
| mem_flags = PREFETCH); |
| } |
| |
| format BasicOperate { |
| 0xc000: rpcc({{ |
| /* Rb is a fake dependency so here is a fun way to get |
| * the parser to understand that. |
| */ |
| uint64_t unused_var M5_VAR_USED = Rb; |
| Ra = FullSystem ? xc->readMiscReg(IPR_CC) : curTick(); |
| }}, IsUnverifiable); |
| |
| // All of the barrier instructions below do nothing in |
| // their execute() methods (hence the empty code blocks). |
| // All of their functionality is hard-coded in the |
| // pipeline based on the flags IsSerializing, |
| // IsMemBarrier, and IsWriteBarrier. In the current |
| // detailed CPU model, the execute() function only gets |
| // called at fetch, so there's no way to generate pipeline |
| // behavior at any other stage. Once we go to an |
| // exec-in-exec CPU model we should be able to get rid of |
| // these flags and implement this behavior via the |
| // execute() methods. |
| |
| // trapb is just a barrier on integer traps, where excb is |
| // a barrier on integer and FP traps. "EXCB is thus a |
| // superset of TRAPB." (Alpha ARM, Sec 4.11.4) We treat |
| // them the same though. |
| 0x0000: trapb({{ }}, IsSerializing, IsSerializeBefore, No_OpClass); |
| 0x0400: excb({{ }}, IsSerializing, IsSerializeBefore, No_OpClass); |
| 0x4000: mb({{ }}, IsMemBarrier, MemReadOp); |
| 0x4400: wmb({{ }}, IsWriteBarrier, MemWriteOp); |
| } |
| |
| 0xe000: decode FullSystemInt { |
| 0: FailUnimpl::rc_se(); |
| default: BasicOperate::rc({{ |
| Ra = IntrFlag; |
| IntrFlag = 0; |
| }}, IsNonSpeculative, IsUnverifiable); |
| } |
| 0xf000: decode FullSystemInt { |
| 0: FailUnimpl::rs_se(); |
| default: BasicOperate::rs({{ |
| Ra = IntrFlag; |
| IntrFlag = 1; |
| }}, IsNonSpeculative, IsUnverifiable); |
| } |
| } |
| |
| 0x00: decode FullSystemInt { |
| 0: decode PALFUNC { |
| format EmulatedCallPal { |
| 0x00: halt ({{ |
| exitSimLoop("halt instruction encountered"); |
| }}, IsNonSpeculative); |
| 0x83: callsys({{ |
| xc->syscall(R0, &fault); |
| }}, IsSerializeAfter, IsNonSpeculative, IsSyscall); |
| // Read uniq reg into ABI return value register (r0) |
| 0x9e: rduniq({{ R0 = Runiq; }}, IsIprAccess); |
| // Write uniq reg with value from ABI arg register (r16) |
| 0x9f: wruniq({{ Runiq = R16; }}, IsIprAccess); |
| } |
| } |
| default: CallPal::call_pal({{ |
| if (!palValid || |
| (palPriv |
| && xc->readMiscReg(IPR_ICM) != mode_kernel)) { |
| // invalid pal function code, or attempt to do privileged |
| // PAL call in non-kernel mode |
| fault = std::make_shared<UnimplementedOpcodeFault>(); |
| } else { |
| // check to see if simulator wants to do something special |
| // on this PAL call (including maybe suppress it) |
| bool dopal = true; |
| ThreadContext *tc = xc->tcBase(); |
| auto *base_stats = tc->getKernelStats(); |
| auto *stats = dynamic_cast<AlphaISA::Kernel::Statistics *>( |
| base_stats); |
| assert(stats || !base_stats); |
| if (stats) |
| stats->callpal(palFunc, tc); |
| |
| System *sys = tc->getSystemPtr(); |
| |
| switch (palFunc) { |
| case PAL::halt: |
| xc->tcBase()->halt(); |
| if (--System::numSystemsRunning == 0) |
| exitSimLoop("all cpus halted"); |
| break; |
| |
| case PAL::bpt: |
| case PAL::bugchk: |
| if (sys->breakpoint()) |
| dopal = false; |
| break; |
| } |
| |
| if (dopal) { |
| xc->setMiscReg(IPR_EXC_ADDR, NPC); |
| NPC = xc->readMiscReg(IPR_PAL_BASE) + palOffset; |
| } |
| } |
| }}, IsNonSpeculative); |
| } |
| |
| 0x1b: decode PALMODE { |
| 0: OpcdecFault::hw_st_quad(); |
| 1: decode HW_LDST_QUAD { |
| format HwLoad { |
| 0: hw_ld({{ EA = (Rb + disp) & ~3; }}, {{ Ra = Mem_ul; }}, |
| L, IsSerializing, IsSerializeBefore); |
| 1: hw_ld({{ EA = (Rb + disp) & ~7; }}, {{ Ra = Mem_uq; }}, |
| Q, IsSerializing, IsSerializeBefore); |
| } |
| } |
| } |
| |
| 0x1f: decode PALMODE { |
| 0: OpcdecFault::hw_st_cond(); |
| format HwStore { |
| 1: decode HW_LDST_COND { |
| 0: decode HW_LDST_QUAD { |
| 0: hw_st({{ EA = (Rb + disp) & ~3; }}, |
| {{ Mem_ul = Ra<31:0>; }}, L, IsSerializing, IsSerializeBefore); |
| 1: hw_st({{ EA = (Rb + disp) & ~7; }}, |
| {{ Mem_uq = Ra_uq; }}, Q, IsSerializing, IsSerializeBefore); |
| } |
| |
| 1: FailUnimpl::hw_st_cond(); |
| } |
| } |
| } |
| |
| 0x19: decode PALMODE { |
| 0: OpcdecFault::hw_mfpr(); |
| format HwMoveIPR { |
| 1: hw_mfpr({{ |
| int miscRegIndex = (ipr_index < MaxInternalProcRegs) ? |
| IprToMiscRegIndex[ipr_index] : -1; |
| if(miscRegIndex < 0 || !IprIsReadable(miscRegIndex) || |
| miscRegIndex >= NumInternalProcRegs) |
| fault = std::make_shared<UnimplementedOpcodeFault>(); |
| else |
| Ra = xc->readMiscReg(miscRegIndex); |
| }}, IsIprAccess); |
| } |
| } |
| |
| 0x1d: decode PALMODE { |
| 0: OpcdecFault::hw_mtpr(); |
| format HwMoveIPR { |
| 1: hw_mtpr({{ |
| int miscRegIndex = (ipr_index < MaxInternalProcRegs) ? |
| IprToMiscRegIndex[ipr_index] : -1; |
| if(miscRegIndex < 0 || !IprIsWritable(miscRegIndex) || |
| miscRegIndex >= NumInternalProcRegs) |
| fault = std::make_shared<UnimplementedOpcodeFault>(); |
| else |
| xc->setMiscReg(miscRegIndex, Ra); |
| if (traceData) { traceData->setData(Ra); } |
| }}, IsIprAccess); |
| } |
| } |
| |
| 0x1e: decode PALMODE { |
| 0: OpcdecFault::hw_rei(); |
| format BasicOperate { |
| 1: hw_rei({{ |
| Addr pc = PC; |
| if (!(pc & 0x3)) |
| return std::make_shared<UnimplementedOpcodeFault>(); |
| |
| LockFlag = false; |
| NPC = IprExcAddr; |
| |
| ThreadContext *tc = xc->tcBase(); |
| auto *base_stats = tc->getKernelStats(); |
| auto *stats = dynamic_cast<AlphaISA::Kernel::Statistics *>( |
| base_stats); |
| assert(stats || !base_stats); |
| if (stats) |
| stats->hwrei(); |
| |
| CPA::cpa()->swAutoBegin(tc, IprExcAddr); |
| }}, IsSerializing, IsSerializeBefore); |
| } |
| } |
| |
| format BasicOperate { |
| // M5 special opcodes use the reserved 0x01 opcode space |
| 0x01: decode M5FUNC { |
| 0x00: arm({{ |
| PseudoInst::arm(xc->tcBase()); |
| }}, IsNonSpeculative); |
| 0x01: quiesce({{ |
| // Don't sleep if (unmasked) interrupts are pending |
| Interrupts* interrupts = |
| xc->tcBase()->getCpuPtr()->getInterruptController(0); |
| if (interrupts->checkInterrupts(xc->tcBase())) { |
| PseudoInst::quiesceSkip(xc->tcBase()); |
| } else { |
| PseudoInst::quiesce(xc->tcBase()); |
| } |
| }}, IsNonSpeculative, IsQuiesce); |
| 0x02: quiesceNs({{ |
| PseudoInst::quiesceNs(xc->tcBase(), R16); |
| }}, IsNonSpeculative, IsQuiesce); |
| 0x03: quiesceCycles({{ |
| PseudoInst::quiesceCycles(xc->tcBase(), R16); |
| }}, IsNonSpeculative, IsQuiesce, IsUnverifiable); |
| 0x04: quiesceTime({{ |
| R0 = PseudoInst::quiesceTime(xc->tcBase()); |
| }}, IsNonSpeculative, IsUnverifiable); |
| 0x07: rpns({{ |
| R0 = PseudoInst::rpns(xc->tcBase()); |
| }}, IsNonSpeculative, IsUnverifiable); |
| 0x09: wakeCPU({{ |
| PseudoInst::wakeCPU(xc->tcBase(), R16); |
| }}, IsNonSpeculative, IsUnverifiable); |
| 0x10: deprecated_ivlb({{ |
| warn_once("Obsolete M5 ivlb instruction encountered.\n"); |
| }}); |
| 0x11: deprecated_ivle({{ |
| warn_once("Obsolete M5 ivlb instruction encountered.\n"); |
| }}); |
| 0x20: deprecated_exit ({{ |
| warn_once("deprecated M5 exit instruction encountered.\n"); |
| PseudoInst::m5exit(xc->tcBase(), 0); |
| }}, No_OpClass, IsNonSpeculative); |
| 0x21: m5exit({{ |
| PseudoInst::m5exit(xc->tcBase(), R16); |
| }}, No_OpClass, IsNonSpeculative); |
| 0x31: loadsymbol({{ |
| PseudoInst::loadsymbol(xc->tcBase()); |
| }}, No_OpClass, IsNonSpeculative); |
| 0x30: initparam({{ |
| Ra = PseudoInst::initParam(xc->tcBase(), R16, R17); |
| }}); |
| 0x40: resetstats({{ |
| PseudoInst::resetstats(xc->tcBase(), R16, R17); |
| }}, IsNonSpeculative); |
| 0x41: dumpstats({{ |
| PseudoInst::dumpstats(xc->tcBase(), R16, R17); |
| }}, IsNonSpeculative); |
| 0x42: dumpresetstats({{ |
| PseudoInst::dumpresetstats(xc->tcBase(), R16, R17); |
| }}, IsNonSpeculative); |
| 0x43: m5checkpoint({{ |
| PseudoInst::m5checkpoint(xc->tcBase(), R16, R17); |
| }}, IsNonSpeculative); |
| 0x50: m5readfile({{ |
| R0 = PseudoInst::readfile(xc->tcBase(), R16, R17, R18); |
| }}, IsNonSpeculative); |
| 0x51: m5break({{ |
| PseudoInst::debugbreak(xc->tcBase()); |
| }}, IsNonSpeculative); |
| 0x52: m5switchcpu({{ |
| PseudoInst::switchcpu(xc->tcBase()); |
| }}, IsNonSpeculative); |
| 0x53: m5addsymbol({{ |
| PseudoInst::addsymbol(xc->tcBase(), R16, R17); |
| }}, IsNonSpeculative); |
| 0x54: m5panic({{ |
| panic("M5 panic instruction called at pc = %#x.", PC); |
| }}, IsNonSpeculative); |
| #define CPANN(lbl) CPA::cpa()->lbl(xc->tcBase()) |
| 0x55: decode RA { |
| 0x00: m5a_old({{ |
| panic("Deprecated M5 annotate instruction executed " |
| "at pc = %#x\n", PC); |
| }}, IsNonSpeculative); |
| 0x01: m5a_bsm({{ |
| CPANN(swSmBegin); |
| }}, IsNonSpeculative); |
| 0x02: m5a_esm({{ |
| CPANN(swSmEnd); |
| }}, IsNonSpeculative); |
| 0x03: m5a_begin({{ |
| CPANN(swExplictBegin); |
| }}, IsNonSpeculative); |
| 0x04: m5a_end({{ |
| CPANN(swEnd); |
| }}, IsNonSpeculative); |
| 0x06: m5a_q({{ |
| CPANN(swQ); |
| }}, IsNonSpeculative); |
| 0x07: m5a_dq({{ |
| CPANN(swDq); |
| }}, IsNonSpeculative); |
| 0x08: m5a_wf({{ |
| CPANN(swWf); |
| }}, IsNonSpeculative); |
| 0x09: m5a_we({{ |
| CPANN(swWe); |
| }}, IsNonSpeculative); |
| 0x0C: m5a_sq({{ |
| CPANN(swSq); |
| }}, IsNonSpeculative); |
| 0x0D: m5a_aq({{ |
| CPANN(swAq); |
| }}, IsNonSpeculative); |
| 0x0E: m5a_pq({{ |
| CPANN(swPq); |
| }}, IsNonSpeculative); |
| 0x0F: m5a_l({{ |
| CPANN(swLink); |
| }}, IsNonSpeculative); |
| 0x10: m5a_identify({{ |
| CPANN(swIdentify); |
| }}, IsNonSpeculative); |
| 0x11: m5a_getid({{ |
| R0 = CPANN(swGetId); |
| }}, IsNonSpeculative); |
| 0x13: m5a_scl({{ |
| CPANN(swSyscallLink); |
| }}, IsNonSpeculative); |
| 0x14: m5a_rq({{ |
| CPANN(swRq); |
| }}, IsNonSpeculative); |
| } // M5 Annotate Operations |
| #undef CPANN |
| 0x56: m5reserved2({{ |
| warn("M5 reserved opcode ignored"); |
| }}, IsNonSpeculative); |
| 0x57: m5reserved3({{ |
| warn("M5 reserved opcode ignored"); |
| }}, IsNonSpeculative); |
| 0x58: m5reserved4({{ |
| warn("M5 reserved opcode ignored"); |
| }}, IsNonSpeculative); |
| 0x59: m5reserved5({{ |
| warn("M5 reserved opcode ignored"); |
| }}, IsNonSpeculative); |
| } |
| } |
| } |