| // -*- mode:c++ -*- |
| |
| // Copyright (c) 2010-2011, 2016-2019 ARM Limited |
| // All rights reserved |
| // |
| // The license below extends only to copyright in the software and shall |
| // not be construed as granting a license to any other intellectual |
| // property including but not limited to intellectual property relating |
| // to a hardware implementation of the functionality of the software |
| // licensed hereunder. You may use the software subject to the license |
| // terms below provided that you ensure that this notice is replicated |
| // unmodified and in its entirety in all distributions of the software, |
| // modified or unmodified, in source code or in binary form. |
| // |
| // Copyright (c) 2007-2008 The Florida State University |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are |
| // met: redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer; |
| // redistributions in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the distribution; |
| // neither the name of the copyright holders nor the names of its |
| // contributors may be used to endorse or promote products derived from |
| // this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| //////////////////////////////////////////////////////////////////// |
| // |
| // Floating Point operate instructions |
| // |
| |
| output header {{ |
| |
| template<template <typename T> class Base> |
| StaticInstPtr |
| newNeonMemInst(const unsigned size, |
| const ExtMachInst &machInst, |
| const RegIndex dest, const RegIndex ra, |
| const uint32_t imm, const unsigned extraMemFlags) |
| { |
| switch (size) { |
| case 0: |
| return new Base<uint8_t>(machInst, dest, ra, imm, extraMemFlags); |
| case 1: |
| return new Base<uint16_t>(machInst, dest, ra, imm, extraMemFlags); |
| case 2: |
| return new Base<uint32_t>(machInst, dest, ra, imm, extraMemFlags); |
| case 3: |
| return new Base<uint64_t>(machInst, dest, ra, imm, extraMemFlags); |
| default: |
| panic("Unrecognized width %d for Neon mem inst.\n", (1 << size)); |
| } |
| } |
| |
| template<template <typename T> class Base> |
| StaticInstPtr |
| newNeonMixInst(const unsigned size, |
| const ExtMachInst &machInst, |
| const RegIndex dest, const RegIndex op1, |
| const uint32_t step) |
| { |
| switch (size) { |
| case 0: |
| return new Base<uint8_t>(machInst, dest, op1, step); |
| case 1: |
| return new Base<uint16_t>(machInst, dest, op1, step); |
| case 2: |
| return new Base<uint32_t>(machInst, dest, op1, step); |
| case 3: |
| return new Base<uint64_t>(machInst, dest, op1, step); |
| default: |
| panic("Unrecognized width %d for Neon mem inst.\n", (1 << size)); |
| } |
| } |
| |
| }}; |
| |
| let {{ |
| header_output = ''' |
| StaticInstPtr |
| decodeNeonMem(ExtMachInst machInst); |
| |
| StaticInstPtr |
| decodeNeonData(ExtMachInst machInst); |
| |
| StaticInstPtr |
| decodeAdvancedSIMD(ExtMachInst machInst); |
| ''' |
| |
| decoder_output = ''' |
| StaticInstPtr |
| decodeNeonMem(ExtMachInst machInst) |
| { |
| const uint32_t b = bits(machInst, 11, 8); |
| const bool single = bits(machInst, 23); |
| const bool singleAll = single && (bits(b, 3, 2) == 3); |
| const bool load = bits(machInst, 21); |
| |
| unsigned width = 0; |
| |
| if (single) { |
| width = bits(b, 1, 0) + 1; |
| } else { |
| switch (bits(b, 3, 1)) { |
| case 0x0: width = 4; |
| break; |
| case 0x1: width = (b & 0x1) ? 2 : 1; |
| break; |
| case 0x2: width = 3; |
| break; |
| case 0x3: width = 1; |
| break; |
| case 0x4: width = 2; |
| break; |
| case 0x5: |
| if ((b & 0x1) == 0) { |
| width = 1; |
| break; |
| } |
| [[fallthrough]]; |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| assert(width > 0 && width <= 4); |
| |
| const RegIndex rm = (RegIndex)(uint32_t)bits(machInst, 3, 0); |
| const RegIndex rn = (RegIndex)(uint32_t)bits(machInst, 19, 16); |
| const RegIndex vd = (RegIndex)(uint32_t)(bits(machInst, 15, 12) | |
| bits(machInst, 22) << 4); |
| const uint32_t type = bits(machInst, 11, 8); |
| uint32_t size = 0; |
| uint32_t align = 0; |
| unsigned inc = 1; |
| unsigned regs = 1; |
| unsigned lane = 0; |
| if (single) { |
| if (singleAll) { |
| size = bits(machInst, 7, 6); |
| bool t = bits(machInst, 5); |
| align = size | MMU::AllowUnaligned; |
| if (width == 1) { |
| regs = t ? 2 : 1; |
| inc = 1; |
| } else { |
| regs = width; |
| inc = t ? 2 : 1; |
| } |
| switch (width) { |
| case 1: |
| case 2: |
| if (bits(machInst, 4)) |
| align = size + width - 1; |
| break; |
| case 3: |
| break; |
| case 4: |
| if (size == 3) { |
| if (bits(machInst, 4) == 0) |
| return new Unknown(machInst); |
| size = 2; |
| align = 0x4; |
| } else if (size == 2) { |
| if (bits(machInst, 4)) |
| align = 0x3; |
| } else { |
| if (bits(machInst, 4)) |
| align = size + 2; |
| } |
| break; |
| } |
| } else { |
| size = bits(machInst, 11, 10); |
| align = size | MMU::AllowUnaligned; |
| regs = width; |
| unsigned indexAlign = bits(machInst, 7, 4); |
| // If width is 1, inc is always 1. That's overridden later. |
| switch (size) { |
| case 0: |
| inc = 1; |
| lane = bits(indexAlign, 3, 1); |
| break; |
| case 1: |
| inc = bits(indexAlign, 1) ? 2 : 1; |
| lane = bits(indexAlign, 3, 2); |
| break; |
| case 2: |
| inc = bits(indexAlign, 2) ? 2 : 1; |
| lane = bits(indexAlign, 3); |
| break; |
| } |
| // Override inc for width of 1. |
| if (width == 1) { |
| inc = 1; |
| } |
| switch (width) { |
| case 1: |
| switch (size) { |
| case 0: |
| break; |
| case 1: |
| if (bits(indexAlign, 0)) |
| align = 1; |
| break; |
| case 2: |
| if (bits(indexAlign, 1, 0)) |
| align = 2; |
| break; |
| } |
| break; |
| case 2: |
| if (bits(indexAlign, 0)) |
| align = size + 1; |
| break; |
| case 3: |
| break; |
| case 4: |
| switch (size) { |
| case 0: |
| case 1: |
| if (bits(indexAlign, 0)) |
| align = size + 2; |
| break; |
| case 2: |
| if (bits(indexAlign, 0)) |
| align = bits(indexAlign, 1, 0) + 2; |
| break; |
| } |
| break; |
| } |
| } |
| if (size == 0x3) { |
| return new Unknown(machInst); |
| } |
| } else { |
| size = bits(machInst, 7, 6); |
| align = bits(machInst, 5, 4); |
| if (align == 0) { |
| // @align wasn't specified, so alignment can be turned off. |
| align = size | MMU::AllowUnaligned; |
| } else { |
| align = align + 2; |
| } |
| switch (width) { |
| case 1: |
| switch (type) { |
| case 0x7: regs = 1; |
| break; |
| case 0xa: regs = 2; |
| break; |
| case 0x6: regs = 3; |
| break; |
| case 0x2: regs = 4; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| break; |
| case 2: |
| // Regs doesn't behave exactly as it does in the manual |
| // because they loop over regs registers twice and we break |
| // it down in the macroop. |
| switch (type) { |
| case 0x8: regs = 2; inc = 1; |
| break; |
| case 0x9: regs = 2; inc = 2; |
| break; |
| case 0x3: regs = 4; inc = 2; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| break; |
| case 3: |
| regs = 3; |
| switch (type) { |
| case 0x4: inc = 1; |
| break; |
| case 0x5: inc = 2;; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| break; |
| case 4: |
| regs = 4; |
| switch (type) { |
| case 0: inc = 1; |
| break; |
| case 1: inc = 2; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| break; |
| } |
| } |
| |
| if (load) { |
| // Load instructions. |
| if (single) { |
| return new VldSingle(machInst, singleAll, width, rn, vd, |
| regs, inc, size, align, rm, lane); |
| } else { |
| return new VldMult(machInst, width, rn, vd, |
| regs, inc, size, align, rm); |
| } |
| } else { |
| // Store instructions. |
| if (single) { |
| if (singleAll) { |
| return new Unknown(machInst); |
| } else { |
| return new VstSingle(machInst, false, width, rn, vd, |
| regs, inc, size, align, rm, lane); |
| } |
| } else { |
| return new VstMult(machInst, width, rn, vd, |
| regs, inc, size, align, rm); |
| } |
| } |
| return new Unknown(machInst); |
| } |
| ''' |
| decoder_output += ''' |
| StaticInstPtr |
| decodeAdvancedSIMD(ExtMachInst machInst) |
| { |
| uint8_t op_code = (bits(machInst, 25) << 1) |
| | bits(machInst, 21); |
| |
| IntRegIndex vd = (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| IntRegIndex vn = (IntRegIndex)(2 * (bits(machInst, 19, 16) | |
| (bits(machInst, 7) << 4))); |
| IntRegIndex vm = (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| bool q = bits (machInst, 6); |
| switch (op_code) { |
| case 0x0: |
| { |
| // VCADD |
| bool s = bits (machInst, 20); |
| if (s) { |
| if (q) |
| return new VcaddQ<uint32_t>(machInst, vd, vn, vm); |
| else |
| return new VcaddD<uint32_t>(machInst, vd, vn, vm); |
| } else { |
| if (q) |
| return new VcaddQ<uint16_t>(machInst, vd, vn, vm); |
| else |
| return new VcaddD<uint16_t>(machInst, vd, vn, vm); |
| } |
| } |
| case 0x1: |
| { |
| // VCMLA |
| bool s = bits (machInst, 20); |
| if (s) { |
| if (q) |
| return new VcmlaQ<uint32_t>(machInst, vd, vn, vm); |
| else |
| return new VcmlaD<uint32_t>(machInst, vd, vn, vm); |
| } else { |
| if (q) |
| return new VcmlaQ<uint16_t>(machInst, vd, vn, vm); |
| else |
| return new VcmlaD<uint16_t>(machInst, vd, vn, vm); |
| } |
| } |
| case 0x2: |
| case 0x3: |
| { |
| // VCMLA by element |
| bool s = bits (machInst, 23); |
| if (s) { |
| uint8_t index_fp = 0; |
| if (q) |
| return new VcmlaElemQ<uint32_t>(machInst, vd, vn, vm, |
| index_fp); |
| else |
| return new VcmlaElemD<uint32_t>(machInst, vd, vn, vm, |
| index_fp); |
| } else { |
| vm = (IntRegIndex)(uint8_t)(2* bits(machInst, 3, 0)); |
| uint8_t index_fp = bits(machInst, 5); |
| if (q) |
| return new VcmlaElemQ<uint16_t>(machInst, vd, vn, vm, |
| index_fp); |
| else |
| return new VcmlaElemD<uint16_t>(machInst, vd, vn, vm, |
| index_fp); |
| } |
| } |
| default: |
| return new Unknown64(machInst); |
| } |
| |
| } |
| ''' |
| |
| |
| decoder_output += ''' |
| static StaticInstPtr |
| decodeNeonThreeRegistersSameLength(ExtMachInst machInst) |
| { |
| const bool u = THUMB ? bits(machInst, 28) : bits(machInst, 24); |
| const uint32_t opc = bits(machInst, 11, 8); |
| const bool o1 = bits(machInst, 4); |
| const uint32_t size = bits(machInst, 21, 20); |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vn = |
| (IntRegIndex)(2 * (bits(machInst, 19, 16) | |
| (bits(machInst, 7) << 4))); |
| const IntRegIndex vm = |
| (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| const bool q = bits(machInst, 6); |
| if (q && ((vd & 0x1) || (vn & 0x1) || (vm & 0x1))) |
| return new Unknown(machInst); |
| switch (opc) { |
| case 0x0: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<VqaddUD, VqaddUQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonSThreeReg<VqaddSD, VqaddSQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } else { |
| if (size == 3) |
| return new Unknown(machInst); |
| return decodeNeonUSThreeReg<VhaddD, VhaddQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| case 0x1: |
| if (!o1) { |
| return decodeNeonUSThreeReg<VrhaddD, VrhaddQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } else { |
| if (u) { |
| switch (size) { |
| case 0: |
| if (q) { |
| return new VeorQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VeorD<uint64_t>(machInst, vd, vn, vm); |
| } |
| case 1: |
| if (q) { |
| return new VbslQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VbslD<uint64_t>(machInst, vd, vn, vm); |
| } |
| case 2: |
| if (q) { |
| return new VbitQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VbitD<uint64_t>(machInst, vd, vn, vm); |
| } |
| case 3: |
| if (q) { |
| return new VbifQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VbifD<uint64_t>(machInst, vd, vn, vm); |
| } |
| default: |
| GEM5_UNREACHABLE; |
| } |
| } else { |
| switch (size) { |
| case 0: |
| if (q) { |
| return new VandQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VandD<uint64_t>(machInst, vd, vn, vm); |
| } |
| case 1: |
| if (q) { |
| return new VbicQ<uint64_t>(machInst, vd, vn, vm); |
| } else { |
| return new VbicD<uint64_t>(machInst, vd, vn, vm); |
| } |
| case 2: |
| if (vn == vm) { |
| if (q) { |
| return new VmovQ<uint64_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VmovD<uint64_t>( |
| machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VorrQ<uint64_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VorrD<uint64_t>( |
| machInst, vd, vn, vm); |
| } |
| } |
| case 3: |
| if (q) { |
| return new VornQ<uint64_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VornD<uint64_t>( |
| machInst, vd, vn, vm); |
| } |
| default: |
| GEM5_UNREACHABLE; |
| } |
| } |
| } |
| case 0x2: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<VqsubUD, VqsubUQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonSThreeReg<VqsubSD, VqsubSQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } else { |
| if (size == 3) |
| return new Unknown(machInst); |
| return decodeNeonUSThreeReg<VhsubD, VhsubQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| case 0x3: |
| if (o1) { |
| return decodeNeonUSThreeReg<VcgeD, VcgeQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUSThreeReg<VcgtD, VcgtQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| case 0x4: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<VqshlUD, VqshlUQ>( |
| q, size, machInst, vd, vm, vn); |
| } else { |
| return decodeNeonSThreeReg<VqshlSD, VqshlSQ>( |
| q, size, machInst, vd, vm, vn); |
| } |
| } else { |
| return decodeNeonUSThreeReg<VshlD, VshlQ>( |
| q, u, size, machInst, vd, vm, vn); |
| } |
| case 0x5: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<VqrshlUD, VqrshlUQ>( |
| q, size, machInst, vd, vm, vn); |
| } else { |
| return decodeNeonSThreeReg<VqrshlSD, VqrshlSQ>( |
| q, size, machInst, vd, vm, vn); |
| } |
| } else { |
| return decodeNeonUSThreeReg<VrshlD, VrshlQ>( |
| q, u, size, machInst, vd, vm, vn); |
| } |
| case 0x6: |
| if (o1) { |
| return decodeNeonUSThreeReg<VminD, VminQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUSThreeReg<VmaxD, VmaxQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| case 0x7: |
| if (o1) { |
| return decodeNeonUSThreeReg<VabaD, VabaQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } else { |
| if (bits(machInst, 23) == 1) { |
| if (q) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonUSThreeUSReg<Vabdl>( |
| u, size, machInst, vd, vn, vm); |
| } |
| } else { |
| return decodeNeonUSThreeReg<VabdD, VabdQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| } |
| case 0x8: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<VceqD, VceqQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUThreeReg<VtstD, VtstQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } else { |
| if (u) { |
| return decodeNeonUThreeReg<NVsubD, NVsubQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUThreeReg<NVaddD, NVaddQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } |
| case 0x9: |
| if (o1) { |
| if (u) { |
| return decodeNeonUThreeReg<NVmulpD, NVmulpQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonSThreeReg<NVmulD, NVmulQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } else { |
| if (u) { |
| return decodeNeonUSThreeReg<NVmlsD, NVmlsQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUSThreeReg<NVmlaD, NVmlaQ>( |
| q, u, size, machInst, vd, vn, vm); |
| } |
| } |
| case 0xa: |
| if (q) |
| return new Unknown(machInst); |
| if (o1) { |
| return decodeNeonUSThreeUSReg<VpminD>( |
| u, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUSThreeUSReg<VpmaxD>( |
| u, size, machInst, vd, vn, vm); |
| } |
| case 0xb: |
| if (o1) { |
| if (u) { |
| return decodeNeonSThreeSReg<VqrdmlahD, VqrdmlahQ>( |
| q, size, machInst, vd, vn, vm); |
| } else if (q) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonUThreeUSReg<NVpaddD>( |
| size, machInst, vd, vn, vm); |
| } |
| } else { |
| if (u) { |
| return decodeNeonSThreeSReg<VqrdmulhD, VqrdmulhQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonSThreeSReg<VqdmulhD, VqdmulhQ>( |
| q, size, machInst, vd, vn, vm); |
| } |
| } |
| case 0xc: |
| if (o1) { |
| if (u) { |
| return decodeNeonSThreeSReg<VqrdmlshD, VqrdmlshQ>( |
| q, size, machInst, vd, vn, vm); |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new NVfmaQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new NVfmaDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new NVfmsQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new NVfmsDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } |
| } else { |
| if (u) { |
| switch (size) { |
| case 0x0: |
| return new SHA256H(machInst, vd, vn, vm); |
| case 0x1: |
| return new SHA256H2(machInst, vd, vn, vm); |
| case 0x2: |
| return new SHA256SU1(machInst, vd, vn, vm); |
| case 0x3: |
| return new Unknown(machInst); |
| default: |
| GEM5_UNREACHABLE; |
| } |
| } else { |
| switch (size) { |
| case 0x0: |
| return new SHA1C(machInst, vd, vn, vm); |
| case 0x1: |
| return new SHA1P(machInst, vd, vn, vm); |
| case 0x2: |
| return new SHA1M(machInst, vd, vn, vm); |
| case 0x3: |
| return new SHA1SU0(machInst, vd, vn, vm); |
| default: |
| GEM5_UNREACHABLE; |
| } |
| } |
| } |
| return new Unknown(machInst); |
| case 0xd: |
| if (o1) { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new NVmulQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new NVmulDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new NVmlaQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new NVmlaDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new NVmlsQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new NVmlsDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } |
| } else { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VpaddQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VpaddDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VabdQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VabdDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VaddQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VaddDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VsubQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VsubDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } |
| } |
| case 0xe: |
| if (o1) { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VacgeQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VacgeDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VacgtQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VacgtDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } else { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VcgeQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VcgeDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VcgtQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VcgtDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VceqQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VceqDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } |
| } |
| case 0xf: |
| if (o1) { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VmaxnmQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VmaxnmDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VminnmQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VminnmDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VrecpsQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VrecpsDFp<float>(machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VrsqrtsQFp<float>(machInst, vd, vn, vm); |
| } else { |
| return new VrsqrtsDFp<float>(machInst, vd, vn, vm); |
| } |
| } |
| } |
| } else { |
| if (u) { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VpmaxQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VpmaxDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VpminQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VpminDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } |
| } else { |
| if (bits(size, 1) == 0) { |
| if (q) { |
| return new VmaxQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VmaxDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } else { |
| if (q) { |
| return new VminQFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } else { |
| return new VminDFp<uint32_t>( |
| machInst, vd, vn, vm); |
| } |
| } |
| } |
| } |
| } |
| return new Unknown(machInst); |
| } |
| |
| static StaticInstPtr |
| decodeNeonOneRegModImm(ExtMachInst machInst) |
| { |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const bool q = bits(machInst, 6); |
| const bool op = bits(machInst, 5); |
| const uint8_t cmode = bits(machInst, 11, 8); |
| const uint8_t imm = ((THUMB ? bits(machInst, 28) : |
| bits(machInst, 24)) << 7) | |
| (bits(machInst, 18, 16) << 4) | |
| (bits(machInst, 3, 0) << 0); |
| |
| // Check for invalid immediate encodings and return an unknown op |
| // if it happens |
| bool immValid = true; |
| const uint64_t bigImm = simd_modified_imm(op, cmode, imm, immValid); |
| if (!immValid) { |
| return new Unknown(machInst); |
| } |
| |
| if (op) { |
| if (bits(cmode, 3) == 0) { |
| if (bits(cmode, 0) == 0) { |
| if (q) |
| return new NVmvniQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmvniD<uint64_t>(machInst, vd, bigImm); |
| } else { |
| if (q) |
| return new NVbiciQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVbiciD<uint64_t>(machInst, vd, bigImm); |
| } |
| } else { |
| if (bits(cmode, 2) == 1) { |
| switch (bits(cmode, 1, 0)) { |
| case 0: |
| case 1: |
| if (q) |
| return new NVmvniQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmvniD<uint64_t>(machInst, vd, bigImm); |
| case 2: |
| if (q) |
| return new NVmoviQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmoviD<uint64_t>(machInst, vd, bigImm); |
| case 3: |
| if (q) |
| return new Unknown(machInst); |
| else |
| return new Unknown(machInst); |
| } |
| } else { |
| if (bits(cmode, 0) == 0) { |
| if (q) |
| return new NVmvniQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmvniD<uint64_t>(machInst, vd, bigImm); |
| } else { |
| if (q) |
| return new NVbiciQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVbiciD<uint64_t>(machInst, vd, bigImm); |
| } |
| } |
| } |
| } else { |
| if (bits(cmode, 3) == 0) { |
| if (bits(cmode, 0) == 0) { |
| if (q) |
| return new NVmoviQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmoviD<uint64_t>(machInst, vd, bigImm); |
| } else { |
| if (q) |
| return new NVorriQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVorriD<uint64_t>(machInst, vd, bigImm); |
| } |
| } else { |
| if (bits(cmode, 2) == 1) { |
| if (q) |
| return new NVmoviQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmoviD<uint64_t>(machInst, vd, bigImm); |
| } else { |
| if (bits(cmode, 0) == 0) { |
| if (q) |
| return new NVmoviQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVmoviD<uint64_t>(machInst, vd, bigImm); |
| } else { |
| if (q) |
| return new NVorriQ<uint64_t>(machInst, vd, bigImm); |
| else |
| return new NVorriD<uint64_t>(machInst, vd, bigImm); |
| } |
| } |
| } |
| } |
| return new Unknown(machInst); |
| } |
| |
| static StaticInstPtr |
| decodeNeonTwoRegAndShift(ExtMachInst machInst) |
| { |
| const uint32_t opc = bits(machInst, 11, 8); |
| const bool u = THUMB ? bits(machInst, 28) : bits(machInst, 24); |
| const bool q = bits(machInst, 6); |
| const bool l = bits(machInst, 7); |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vm = |
| (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| unsigned imm6 = bits(machInst, 21, 16); |
| unsigned imm = ((l ? 1 : 0) << 6) | imm6; |
| unsigned size = 3; |
| unsigned lShiftAmt = 0; |
| unsigned bitSel; |
| for (bitSel = 1 << 6; true; bitSel >>= 1) { |
| if (bitSel & imm) |
| break; |
| else if (!size) |
| return new Unknown(machInst); |
| size--; |
| } |
| lShiftAmt = imm6 & ~bitSel; |
| unsigned rShiftAmt = 0; |
| if (opc != 0xe && opc != 0xf) { |
| if (size > 2) |
| rShiftAmt = 64 - imm6; |
| else |
| rShiftAmt = 2 * (8 << size) - imm6; |
| } |
| |
| switch (opc) { |
| case 0x0: |
| return decodeNeonUSTwoShiftReg<NVshrD, NVshrQ>( |
| q, u, size, machInst, vd, vm, rShiftAmt); |
| case 0x1: |
| return decodeNeonUSTwoShiftReg<NVsraD, NVsraQ>( |
| q, u, size, machInst, vd, vm, rShiftAmt); |
| case 0x2: |
| return decodeNeonUSTwoShiftReg<NVrshrD, NVrshrQ>( |
| q, u, size, machInst, vd, vm, rShiftAmt); |
| case 0x3: |
| return decodeNeonUSTwoShiftReg<NVrsraD, NVrsraQ>( |
| q, u, size, machInst, vd, vm, rShiftAmt); |
| case 0x4: |
| if (u) { |
| return decodeNeonUTwoShiftReg<NVsriD, NVsriQ>( |
| q, size, machInst, vd, vm, rShiftAmt); |
| } else { |
| return new Unknown(machInst); |
| } |
| case 0x5: |
| if (u) { |
| return decodeNeonUTwoShiftReg<NVsliD, NVsliQ>( |
| q, size, machInst, vd, vm, lShiftAmt); |
| } else { |
| return decodeNeonUTwoShiftReg<NVshlD, NVshlQ>( |
| q, size, machInst, vd, vm, lShiftAmt); |
| } |
| case 0x6: |
| case 0x7: |
| if (u) { |
| if (opc == 0x6) { |
| return decodeNeonSTwoShiftReg<NVqshlusD, NVqshlusQ>( |
| q, size, machInst, vd, vm, lShiftAmt); |
| } else { |
| return decodeNeonUTwoShiftReg<NVqshluD, NVqshluQ>( |
| q, size, machInst, vd, vm, lShiftAmt); |
| } |
| } else { |
| return decodeNeonSTwoShiftReg<NVqshlD, NVqshlQ>( |
| q, size, machInst, vd, vm, lShiftAmt); |
| } |
| case 0x8: |
| if (l) { |
| return new Unknown(machInst); |
| } else if (u) { |
| return decodeNeonSTwoShiftSReg<NVqshruns, NVqrshruns>( |
| q, size, machInst, vd, vm, rShiftAmt); |
| } else { |
| return decodeNeonUTwoShiftSReg<NVshrn, NVrshrn>( |
| q, size, machInst, vd, vm, rShiftAmt); |
| } |
| case 0x9: |
| if (l) { |
| return new Unknown(machInst); |
| } else if (u) { |
| return decodeNeonUTwoShiftSReg<NVqshrun, NVqrshrun>( |
| q, size, machInst, vd, vm, rShiftAmt); |
| } else { |
| return decodeNeonSTwoShiftSReg<NVqshrn, NVqrshrn>( |
| q, size, machInst, vd, vm, rShiftAmt); |
| } |
| case 0xa: |
| if (l || q) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonUSTwoShiftSReg<NVmovl, NVshll>( |
| lShiftAmt, u, size, machInst, vd, vm, lShiftAmt); |
| } |
| case 0xe: |
| if (l) { |
| return new Unknown(machInst); |
| } else { |
| if (bits(imm6, 5) == 0) |
| return new Unknown(machInst); |
| if (u) { |
| if (q) { |
| return new NVcvtu2fpQ<float>( |
| machInst, vd, vm, 64 - imm6); |
| } else { |
| return new NVcvtu2fpD<float>( |
| machInst, vd, vm, 64 - imm6); |
| } |
| } else { |
| if (q) { |
| return new NVcvts2fpQ<float>( |
| machInst, vd, vm, 64 - imm6); |
| } else { |
| return new NVcvts2fpD<float>( |
| machInst, vd, vm, 64 - imm6); |
| } |
| } |
| } |
| case 0xf: |
| if (l) { |
| return new Unknown(machInst); |
| } else { |
| if (bits(imm6, 5) == 0) |
| return new Unknown(machInst); |
| if (u) { |
| if (q) { |
| return new NVcvt2ufxQ<float>( |
| machInst, vd, vm, 64 - imm6); |
| } else { |
| return new NVcvt2ufxD<float>( |
| machInst, vd, vm, 64 - imm6); |
| } |
| } else { |
| if (q) { |
| return new NVcvt2sfxQ<float>( |
| machInst, vd, vm, 64 - imm6); |
| } else { |
| return new NVcvt2sfxD<float>( |
| machInst, vd, vm, 64 - imm6); |
| } |
| } |
| } |
| } |
| return new Unknown(machInst); |
| } |
| |
| static StaticInstPtr |
| decodeNeonThreeRegDiffLengths(ExtMachInst machInst) |
| { |
| const bool u = THUMB ? bits(machInst, 28) : bits(machInst, 24); |
| const uint32_t opc = bits(machInst, 11, 8); |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vn = |
| (IntRegIndex)(2 * (bits(machInst, 19, 16) | |
| (bits(machInst, 7) << 4))); |
| const IntRegIndex vm = |
| (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| const unsigned size = bits(machInst, 21, 20); |
| switch (opc) { |
| case 0x0: |
| return decodeNeonUSThreeUSReg<Vaddl>( |
| u, size, machInst, vd, vn, vm); |
| case 0x1: |
| return decodeNeonUSThreeUSReg<Vaddw>( |
| u, size, machInst, vd, vn, vm); |
| case 0x2: |
| return decodeNeonUSThreeUSReg<Vsubl>( |
| u, size, machInst, vd, vn, vm); |
| case 0x3: |
| return decodeNeonUSThreeUSReg<Vsubw>( |
| u, size, machInst, vd, vn, vm); |
| case 0x4: |
| if (u) { |
| return decodeNeonUThreeUSReg<Vraddhn>( |
| size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUThreeUSReg<Vaddhn>( |
| size, machInst, vd, vn, vm); |
| } |
| case 0x5: |
| return decodeNeonUSThreeUSReg<Vabal>( |
| u, size, machInst, vd, vn, vm); |
| case 0x6: |
| if (u) { |
| return decodeNeonUThreeUSReg<Vrsubhn>( |
| size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUThreeUSReg<Vsubhn>( |
| size, machInst, vd, vn, vm); |
| } |
| case 0x7: |
| if (bits(machInst, 23)) { |
| return decodeNeonUSThreeUSReg<Vabdl>( |
| u, size, machInst, vd, vn, vm); |
| } else { |
| return decodeNeonUSThreeReg<VabdD, VabdQ>( |
| bits(machInst, 6), u, size, machInst, vd, vn, vm); |
| } |
| case 0x8: |
| return decodeNeonUSThreeUSReg<Vmlal>( |
| u, size, machInst, vd, vn, vm); |
| case 0xa: |
| return decodeNeonUSThreeUSReg<Vmlsl>( |
| u, size, machInst, vd, vn, vm); |
| case 0x9: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonSThreeUSReg<Vqdmlal>( |
| size, machInst, vd, vn, vm); |
| } |
| case 0xb: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonSThreeUSReg<Vqdmlsl>( |
| size, machInst, vd, vn, vm); |
| } |
| case 0xc: |
| return decodeNeonUSThreeUSReg<Vmull>( |
| u, size, machInst, vd, vn, vm); |
| case 0xd: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| return decodeNeonSThreeUSReg<Vqdmull>( |
| size, machInst, vd, vn, vm); |
| } |
| case 0xe: |
| return decodeNeonUThreeUSReg<Vmullp>( |
| size, machInst, vd, vn, vm); |
| } |
| return new Unknown(machInst); |
| } |
| |
| static StaticInstPtr |
| decodeNeonTwoRegScalar(ExtMachInst machInst) |
| { |
| const bool u = THUMB ? bits(machInst, 28) : bits(machInst, 24); |
| const uint32_t opc = bits(machInst, 11, 8); |
| const unsigned size = bits(machInst, 21, 20); |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vn = |
| (IntRegIndex)(2 * (bits(machInst, 19, 16) | |
| (bits(machInst, 7) << 4))); |
| const IntRegIndex vm = (size == 2) ? |
| (IntRegIndex)(2 * bits(machInst, 3, 0)) : |
| (IntRegIndex)(2 * bits(machInst, 2, 0)); |
| const unsigned index = (size == 2) ? (unsigned)bits(machInst, 5) : |
| (bits(machInst, 3) | (bits(machInst, 5) << 1)); |
| switch (opc) { |
| case 0x0: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VmlasQ<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmlasQ<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VmlasD<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmlasD<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x1: |
| if (u) |
| return new VmlasQFp<float>(machInst, vd, vn, vm, index); |
| else |
| return new VmlasDFp<float>(machInst, vd, vn, vm, index); |
| case 0x4: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VmlssQ<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmlssQ<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VmlssD<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmlssD<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x5: |
| if (u) |
| return new VmlssQFp<float>(machInst, vd, vn, vm, index); |
| else |
| return new VmlssDFp<float>(machInst, vd, vn, vm, index); |
| case 0x2: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new Vmlals<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmlals<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new Vmlals<int16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmlals<int32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x6: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new Vmlsls<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmlsls<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new Vmlsls<int16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmlsls<int32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x3: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| switch (size) { |
| case 1: |
| return new Vqdmlals<int16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vqdmlals<int32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x7: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| switch (size) { |
| case 1: |
| return new Vqdmlsls<int16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vqdmlsls<int32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x8: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VmulsQ<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmulsQ<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VmulsD<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new VmulsD<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0x9: |
| if (u) |
| return new VmulsQFp<float>(machInst, vd, vn, vm, index); |
| else |
| return new VmulsDFp<float>(machInst, vd, vn, vm, index); |
| case 0xa: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new Vmulls<uint16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmulls<uint32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new Vmulls<int16_t>(machInst, vd, vn, vm, index); |
| case 2: |
| return new Vmulls<int32_t>(machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0xb: |
| if (u) { |
| return new Unknown(machInst); |
| } else { |
| if (u) { |
| switch (size) { |
| case 1: |
| return new Vqdmulls<uint16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new Vqdmulls<uint32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new Vqdmulls<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new Vqdmulls<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| } |
| case 0xc: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VqdmulhsQ<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqdmulhsQ<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VqdmulhsD<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqdmulhsD<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0xd: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VqrdmulhsQ<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmulhsQ<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VqrdmulhsD<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmulhsD<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0xe: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VqrdmlahsQ<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmlahsQ<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VqrdmlahsD<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmlahsD<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| case 0xf: |
| if (u) { |
| switch (size) { |
| case 1: |
| return new VqrdmlshsQ<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmlshsQ<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (size) { |
| case 1: |
| return new VqrdmlshsD<int16_t>( |
| machInst, vd, vn, vm, index); |
| case 2: |
| return new VqrdmlshsD<int32_t>( |
| machInst, vd, vn, vm, index); |
| default: |
| return new Unknown(machInst); |
| } |
| } |
| } |
| return new Unknown(machInst); |
| } |
| |
| static StaticInstPtr |
| decodeNeonTwoRegMisc(ExtMachInst machInst) |
| { |
| const uint32_t opc1 = bits(machInst, 17, 16); |
| const uint32_t b = bits(machInst, 10, 6); |
| const bool q = bits(machInst, 6); |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vm = |
| (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| const unsigned size = bits(machInst, 19, 18); |
| switch (opc1) { |
| case 0x0: |
| switch (bits(b, 4, 1)) { |
| case 0x0: |
| switch (size) { |
| case 0: |
| if (q) { |
| return new NVrev64Q<uint8_t>(machInst, vd, vm); |
| } else { |
| return new NVrev64D<uint8_t>(machInst, vd, vm); |
| } |
| case 1: |
| if (q) { |
| return new NVrev64Q<uint16_t>(machInst, vd, vm); |
| } else { |
| return new NVrev64D<uint16_t>(machInst, vd, vm); |
| } |
| case 2: |
| if (q) { |
| return new NVrev64Q<uint32_t>(machInst, vd, vm); |
| } else { |
| return new NVrev64D<uint32_t>(machInst, vd, vm); |
| } |
| default: |
| return new Unknown(machInst); |
| } |
| case 0x1: |
| switch (size) { |
| case 0: |
| if (q) { |
| return new NVrev32Q<uint8_t>(machInst, vd, vm); |
| } else { |
| return new NVrev32D<uint8_t>(machInst, vd, vm); |
| } |
| case 1: |
| if (q) { |
| return new NVrev32Q<uint16_t>(machInst, vd, vm); |
| } else { |
| return new NVrev32D<uint16_t>(machInst, vd, vm); |
| } |
| default: |
| return new Unknown(machInst); |
| } |
| case 0x2: |
| if (size != 0) { |
| return new Unknown(machInst); |
| } else if (q) { |
| return new NVrev16Q<uint8_t>(machInst, vd, vm); |
| } else { |
| return new NVrev16D<uint8_t>(machInst, vd, vm); |
| } |
| case 0x4: |
| return decodeNeonSTwoMiscSReg<NVpaddlD, NVpaddlQ>( |
| q, size, machInst, vd, vm); |
| case 0x5: |
| return decodeNeonUTwoMiscSReg<NVpaddlD, NVpaddlQ>( |
| q, size, machInst, vd, vm); |
| case 0x6: |
| if (q == 0) { |
| return new AESE(machInst, vd, vd, vm); |
| } else { |
| return new AESD(machInst, vd, vd, vm); |
| } |
| case 0x7: |
| if (q == 0) { |
| return new AESMC(machInst, vd, vm); |
| } else { |
| return new AESIMC(machInst, vd, vm); |
| } |
| case 0x8: |
| return decodeNeonSTwoMiscReg<NVclsD, NVclsQ>( |
| q, size, machInst, vd, vm); |
| case 0x9: |
| return decodeNeonSTwoMiscReg<NVclzD, NVclzQ>( |
| q, size, machInst, vd, vm); |
| case 0xa: |
| return decodeNeonUTwoMiscReg<NVcntD, NVcntQ>( |
| q, size, machInst, vd, vm); |
| case 0xb: |
| if (q) |
| return new NVmvnQ<uint64_t>(machInst, vd, vm); |
| else |
| return new NVmvnD<uint64_t>(machInst, vd, vm); |
| case 0xc: |
| return decodeNeonSTwoMiscSReg<NVpadalD, NVpadalQ>( |
| q, size, machInst, vd, vm); |
| case 0xd: |
| return decodeNeonUTwoMiscSReg<NVpadalD, NVpadalQ>( |
| q, size, machInst, vd, vm); |
| case 0xe: |
| return decodeNeonSTwoMiscReg<NVqabsD, NVqabsQ>( |
| q, size, machInst, vd, vm); |
| case 0xf: |
| return decodeNeonSTwoMiscReg<NVqnegD, NVqnegQ>( |
| q, size, machInst, vd, vm); |
| default: |
| return new Unknown(machInst); |
| } |
| case 0x1: |
| switch (bits(b, 3, 1)) { |
| case 0x0: |
| if (bits(b, 4)) { |
| if (q) { |
| return new NVcgtQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVcgtDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| return decodeNeonSTwoMiscReg<NVcgtD, NVcgtQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x1: |
| if (bits(b, 4)) { |
| if (q) { |
| return new NVcgeQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVcgeDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| return decodeNeonSTwoMiscReg<NVcgeD, NVcgeQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x2: |
| if (bits(b, 4)) { |
| if (q) { |
| return new NVceqQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVceqDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| return decodeNeonSTwoMiscReg<NVceqD, NVceqQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x3: |
| if (bits(b, 4)) { |
| if (q) { |
| return new NVcleQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVcleDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| return decodeNeonSTwoMiscReg<NVcleD, NVcleQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x4: |
| if (bits(b, 4)) { |
| if (q) { |
| return new NVcltQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVcltDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| return decodeNeonSTwoMiscReg<NVcltD, NVcltQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x5: |
| if (q) { |
| return new SHA1H(machInst, vd, vm); |
| } else { |
| return new Unknown(machInst); |
| } |
| case 0x6: |
| if (bits(machInst, 10)) { |
| if (q) |
| return new NVabsQFp<float>(machInst, vd, vm); |
| else |
| return new NVabsDFp<float>(machInst, vd, vm); |
| } else { |
| return decodeNeonSTwoMiscReg<NVabsD, NVabsQ>( |
| q, size, machInst, vd, vm); |
| } |
| case 0x7: |
| if (bits(machInst, 10)) { |
| if (q) |
| return new NVnegQFp<float>(machInst, vd, vm); |
| else |
| return new NVnegDFp<float>(machInst, vd, vm); |
| } else { |
| return decodeNeonSTwoMiscReg<NVnegD, NVnegQ>( |
| q, size, machInst, vd, vm); |
| } |
| default: |
| return new Unknown64(machInst); |
| } |
| case 0x2: |
| switch (bits(b, 4, 1)) { |
| case 0x0: |
| if (q) |
| return new NVswpQ<uint64_t>(machInst, vd, vm); |
| else |
| return new NVswpD<uint64_t>(machInst, vd, vm); |
| case 0x1: |
| return decodeNeonUTwoMiscSReg<NVtrnD, NVtrnQ>( |
| q, size, machInst, vd, vm); |
| case 0x2: |
| return decodeNeonUTwoMiscReg<NVuzpD, NVuzpQ>( |
| q, size, machInst, vd, vm); |
| case 0x3: |
| return decodeNeonUTwoMiscReg<NVzipD, NVzipQ>( |
| q, size, machInst, vd, vm); |
| case 0x4: |
| if (b == 0x8) { |
| return decodeNeonUTwoMiscUSReg<NVmovn>( |
| size, machInst, vd, vm); |
| } else { |
| return decodeNeonSTwoMiscUSReg<NVqmovuns>( |
| size, machInst, vd, vm); |
| } |
| case 0x5: |
| if (q) { |
| return decodeNeonUTwoMiscUSReg<NVqmovun>( |
| size, machInst, vd, vm); |
| } else { |
| return decodeNeonSTwoMiscUSReg<NVqmovn>( |
| size, machInst, vd, vm); |
| } |
| case 0x6: |
| if (b == 0xc) { |
| return decodeNeonSTwoShiftUSReg<NVshll>( |
| size, machInst, vd, vm, 8 << size); |
| } else { |
| return new Unknown(machInst); |
| } |
| case 0x7: |
| if (q) { |
| return new SHA256SU0(machInst, vd, vm); |
| } else { |
| return new SHA1SU1(machInst, vd, vm); |
| } |
| case 0xc: |
| case 0xe: |
| if (b == 0x18) { |
| if (size != 1 || (vm % 2)) |
| return new Unknown(machInst); |
| return new NVcvts2h<uint16_t>(machInst, vd, vm); |
| } else if (b == 0x1c) { |
| if (size != 1 || (vd % 2)) |
| return new Unknown(machInst); |
| return new NVcvth2s<uint16_t>(machInst, vd, vm); |
| } else { |
| return new Unknown(machInst); |
| } |
| default: |
| return new Unknown(machInst); |
| } |
| case 0x3: |
| if (bits(b, 4, 3) == 0x3) { |
| if ((q && (vd % 2 || vm % 2)) || size != 2) { |
| return new Unknown(machInst); |
| } else { |
| if (bits(b, 2)) { |
| if (bits(b, 1)) { |
| if (q) { |
| return new NVcvt2ufxQ<float>( |
| machInst, vd, vm, 0); |
| } else { |
| return new NVcvt2ufxD<float>( |
| machInst, vd, vm, 0); |
| } |
| } else { |
| if (q) { |
| return new NVcvt2sfxQ<float>( |
| machInst, vd, vm, 0); |
| } else { |
| return new NVcvt2sfxD<float>( |
| machInst, vd, vm, 0); |
| } |
| } |
| } else { |
| if (bits(b, 1)) { |
| if (q) { |
| return new NVcvtu2fpQ<float>( |
| machInst, vd, vm, 0); |
| } else { |
| return new NVcvtu2fpD<float>( |
| machInst, vd, vm, 0); |
| } |
| } else { |
| if (q) { |
| return new NVcvts2fpQ<float>( |
| machInst, vd, vm, 0); |
| } else { |
| return new NVcvts2fpD<float>( |
| machInst, vd, vm, 0); |
| } |
| } |
| } |
| } |
| } else if ((b & 0x1a) == 0x10) { |
| if (bits(b, 2)) { |
| if (q) { |
| return new NVrecpeQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVrecpeDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| if (q) { |
| return new NVrecpeQ<uint32_t>(machInst, vd, vm); |
| } else { |
| return new NVrecpeD<uint32_t>(machInst, vd, vm); |
| } |
| } |
| } else if ((b & 0x1a) == 0x12) { |
| if (bits(b, 2)) { |
| if (q) { |
| return new NVrsqrteQFp<float>(machInst, vd, vm); |
| } else { |
| return new NVrsqrteDFp<float>(machInst, vd, vm); |
| } |
| } else { |
| if (q) { |
| return new NVrsqrteQ<uint32_t>(machInst, vd, vm); |
| } else { |
| return new NVrsqrteD<uint32_t>(machInst, vd, vm); |
| } |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } |
| return new Unknown(machInst); |
| } |
| |
| StaticInstPtr |
| decodeNeonData(ExtMachInst machInst) |
| { |
| const bool u = THUMB ? bits(machInst, 28) : bits(machInst, 24); |
| const uint32_t a = bits(machInst, 23, 19); |
| const uint32_t q = bits(machInst, 11, 8); |
| const uint32_t c = bits(machInst, 7, 4); |
| if (bits(a, 4) == 0) { |
| return decodeNeonThreeRegistersSameLength(machInst); |
| } else if ((c & 0x9) == 1) { |
| if ((a & 0x7) == 0) { |
| return decodeNeonOneRegModImm(machInst); |
| } else { |
| return decodeNeonTwoRegAndShift(machInst); |
| } |
| } else if ((c & 0x9) == 9) { |
| return decodeNeonTwoRegAndShift(machInst); |
| } else if (bits(a, 2, 1) != 0x3) { |
| if ((c & 0x5) == 0) { |
| return decodeNeonThreeRegDiffLengths(machInst); |
| } else if ((c & 0x5) == 4) { |
| return decodeNeonTwoRegScalar(machInst); |
| } |
| } else if ((a & 0x16) == 0x16) { |
| const IntRegIndex vd = |
| (IntRegIndex)(2 * (bits(machInst, 15, 12) | |
| (bits(machInst, 22) << 4))); |
| const IntRegIndex vn = |
| (IntRegIndex)(2 * (bits(machInst, 19, 16) | |
| (bits(machInst, 7) << 4))); |
| const IntRegIndex vm = |
| (IntRegIndex)(2 * (bits(machInst, 3, 0) | |
| (bits(machInst, 5) << 4))); |
| if (!u) { |
| if (bits(c, 0) == 0) { |
| unsigned imm4 = bits(machInst, 11, 8); |
| bool q = bits(machInst, 6); |
| if (imm4 >= 16 && !q) |
| return new Unknown(machInst); |
| if (q) { |
| return new NVextQ<uint8_t>(machInst, vd, vn, vm, imm4); |
| } else { |
| return new NVextD<uint8_t>(machInst, vd, vn, vm, imm4); |
| } |
| } |
| } else if (bits(q, 3) == 0 && bits(c, 0) == 0) { |
| return decodeNeonTwoRegMisc(machInst); |
| } else if (bits(q, 3, 2) == 0x2 && bits(c, 0) == 0) { |
| unsigned length = bits(machInst, 9, 8) + 1; |
| if ((uint32_t)vn / 2 + length > 32) |
| return new Unknown(machInst); |
| if (bits(machInst, 6) == 0) { |
| switch (length) { |
| case 1: |
| return new NVtbl1(machInst, vd, vn, vm); |
| case 2: |
| return new NVtbl2(machInst, vd, vn, vm); |
| case 3: |
| return new NVtbl3(machInst, vd, vn, vm); |
| case 4: |
| return new NVtbl4(machInst, vd, vn, vm); |
| } |
| } else { |
| switch (length) { |
| case 1: |
| return new NVtbx1(machInst, vd, vn, vm); |
| case 2: |
| return new NVtbx2(machInst, vd, vn, vm); |
| case 3: |
| return new NVtbx3(machInst, vd, vn, vm); |
| case 4: |
| return new NVtbx4(machInst, vd, vn, vm); |
| } |
| } |
| } else if (q == 0xc && (c & 0x9) == 0) { |
| unsigned imm4 = bits(machInst, 19, 16); |
| if (bits(imm4, 2, 0) == 0) |
| return new Unknown(machInst); |
| unsigned size = 0; |
| while ((imm4 & 0x1) == 0) { |
| size++; |
| imm4 >>= 1; |
| } |
| unsigned index = imm4 >> 1; |
| const bool q = bits(machInst, 6); |
| return decodeNeonUTwoShiftSReg<NVdupD, NVdupQ>( |
| q, size, machInst, vd, vm, index); |
| } |
| } |
| return new Unknown(machInst); |
| } |
| ''' |
| }}; |
| |
| def format ThumbNeonMem() {{ |
| decode_block = ''' |
| return decodeNeonMem(machInst); |
| ''' |
| }}; |
| |
| def format ThumbNeonData() {{ |
| decode_block = ''' |
| return decodeNeonData(machInst); |
| ''' |
| }}; |
| |
| def format Thumb32NeonSIMD() {{ |
| decode_block = ''' |
| return decodeAdvancedSIMD(machInst); |
| ''' |
| }}; |
| |
| let {{ |
| header_output = ''' |
| bool |
| wrongVLdmStmRegs(IntRegIndex start_reg, uint8_t count, bool single); |
| |
| StaticInstPtr |
| decodeExtensionRegLoadStore(ExtMachInst machInst); |
| ''' |
| decoder_output = ''' |
| bool |
| wrongVLdmStmRegs(RegIndex start_reg, uint8_t count, bool single) |
| { |
| if (single) { |
| const auto regs = count; |
| if (regs == 0 || start_reg + regs > NumFloatV7ArchRegs) |
| return true; |
| } else { |
| const auto regs = count/2; |
| if (regs == 0 || start_reg + regs > NumFloatV7ArchRegs || |
| regs > 16) |
| return true; |
| } |
| return false; |
| } |
| |
| StaticInstPtr |
| decodeExtensionRegLoadStore(ExtMachInst machInst) |
| { |
| const uint32_t opcode = bits(machInst, 24, 20); |
| const uint32_t offset = bits(machInst, 7, 0); |
| const bool single = (bits(machInst, 8) == 0); |
| const IntRegIndex rn = (IntRegIndex)(uint32_t)bits(machInst, 19, 16); |
| RegIndex vd = decodeFpVd(machInst, single ? 0x2 : 0x3, false); |
| |
| switch (bits(opcode, 4, 3)) { |
| case 0x0: |
| if (bits(opcode, 4, 1) == 0x2 && |
| !(machInst.thumb == 1 && bits(machInst, 28) == 1) && |
| !(machInst.thumb == 0 && machInst.condCode == 0xf)) { |
| if ((bits(machInst, 7, 4) & 0xd) != 1) { |
| break; |
| } |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| const IntRegIndex rt2 = |
| (IntRegIndex)(uint32_t)bits(machInst, 19, 16); |
| const bool op = bits(machInst, 20); |
| uint32_t vm; |
| if (single) { |
| vm = (bits(machInst, 3, 0) << 1) | bits(machInst, 5); |
| } else { |
| vm = (bits(machInst, 3, 0) << 1) | |
| (bits(machInst, 5) << 5); |
| } |
| if (op) { |
| return new Vmov2Core2Reg(machInst, rt, rt2, |
| (IntRegIndex)vm); |
| } else { |
| return new Vmov2Reg2Core(machInst, (IntRegIndex)vm, |
| rt, rt2); |
| } |
| } |
| break; |
| case 0x1: |
| { |
| if (wrongVLdmStmRegs(vd, offset, single)) { |
| break; |
| } |
| switch (bits(opcode, 1, 0)) { |
| case 0x0: |
| return new VLdmStm(machInst, rn, vd, single, |
| true, false, false, offset); |
| case 0x1: |
| return new VLdmStm(machInst, rn, vd, single, |
| true, false, true, offset); |
| case 0x2: |
| return new VLdmStm(machInst, rn, vd, single, |
| true, true, false, offset); |
| case 0x3: |
| // If rn == sp, then this is called vpop. |
| return new VLdmStm(machInst, rn, vd, single, |
| true, true, true, offset); |
| default: |
| GEM5_UNREACHABLE; |
| } |
| } |
| case 0x2: |
| if (bits(opcode, 1, 0) == 0x2) { |
| if (wrongVLdmStmRegs(vd, offset, single)) { |
| break; |
| } else { |
| // If rn == sp, then this is called vpush. |
| return new VLdmStm(machInst, rn, vd, single, |
| false, true, false, offset); |
| } |
| } else if (bits(opcode, 1, 0) == 0x3) { |
| if (wrongVLdmStmRegs(vd, offset, single)) { |
| break; |
| } else { |
| return new VLdmStm(machInst, rn, vd, single, |
| false, true, true, offset); |
| } |
| } |
| [[fallthrough]]; |
| case 0x3: |
| const bool up = (bits(machInst, 23) == 1); |
| const uint32_t imm = bits(machInst, 7, 0) << 2; |
| if (bits(opcode, 1, 0) == 0x0) { |
| if (single) { |
| if (up) { |
| return new %(vstr_us)s(machInst, vd, rn, up, imm); |
| } else { |
| return new %(vstr_s)s(machInst, vd, rn, up, imm); |
| } |
| } else { |
| if (up) { |
| return new %(vstr_ud)s(machInst, vd, vd + 1, |
| rn, up, imm); |
| } else { |
| return new %(vstr_d)s(machInst, vd, vd + 1, |
| rn, up, imm); |
| } |
| } |
| } else if (bits(opcode, 1, 0) == 0x1) { |
| if (single) { |
| if (up) { |
| return new %(vldr_us)s(machInst, vd, rn, up, imm); |
| } else { |
| return new %(vldr_s)s(machInst, vd, rn, up, imm); |
| } |
| } else { |
| if (up) { |
| return new %(vldr_ud)s(machInst, vd, vd + 1, |
| rn, up, imm); |
| } else { |
| return new %(vldr_d)s(machInst, vd, vd + 1, |
| rn, up, imm); |
| } |
| } |
| } |
| } |
| return new Unknown(machInst); |
| } |
| ''' % { |
| "vldr_us" : "VLDR_" + loadImmClassName(False, True, False), |
| "vldr_s" : "VLDR_" + loadImmClassName(False, False, False), |
| "vldr_ud" : "VLDR_" + loadDoubleImmClassName(False, True, False), |
| "vldr_d" : "VLDR_" + loadDoubleImmClassName(False, False, False), |
| "vstr_us" : "VSTR_" + storeImmClassName(False, True, False), |
| "vstr_s" : "VSTR_" + storeImmClassName(False, False, False), |
| "vstr_ud" : "VSTR_" + storeDoubleImmClassName(False, True, False), |
| "vstr_d" : "VSTR_" + storeDoubleImmClassName(False, False, False) |
| } |
| }}; |
| |
| def format ExtensionRegLoadStore() {{ |
| decode_block = ''' |
| return decodeExtensionRegLoadStore(machInst); |
| ''' |
| }}; |
| |
| let {{ |
| header_output = ''' |
| StaticInstPtr |
| decodeShortFpTransfer(ExtMachInst machInst); |
| |
| IntRegIndex decodeFpVd(ExtMachInst machInst, uint32_t size, bool isInt); |
| IntRegIndex decodeFpVm(ExtMachInst machInst, uint32_t size, bool isInt); |
| IntRegIndex decodeFpVn(ExtMachInst machInst, uint32_t size); |
| ''' |
| decoder_output = ''' |
| IntRegIndex decodeFpVd(ExtMachInst machInst, uint32_t size, bool isInt) |
| { |
| if (!isInt and size == 3) { |
| return (IntRegIndex)((bits(machInst, 22) << 5) | |
| (bits(machInst, 15, 12) << 1)); |
| } else { |
| return (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| } |
| } |
| |
| IntRegIndex decodeFpVm(ExtMachInst machInst, uint32_t size, bool isInt) |
| { |
| if (!isInt and size == 3) { |
| return (IntRegIndex)((bits(machInst, 5) << 5) | |
| (bits(machInst, 3, 0) << 1)); |
| } else { |
| return (IntRegIndex)(bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1)); |
| } |
| } |
| |
| IntRegIndex decodeFpVn(ExtMachInst machInst, uint32_t size) |
| { |
| if (size == 3) { |
| return (IntRegIndex)((bits(machInst, 7) << 5) | |
| (bits(machInst, 19, 16) << 1)); |
| } else { |
| return (IntRegIndex)(bits(machInst, 7) | |
| (bits(machInst, 19, 16) << 1)); |
| } |
| } |
| |
| StaticInstPtr |
| decodeFloatingPointDataProcessing(ExtMachInst machInst) { |
| const uint32_t op0 = bits(machInst, 23, 20); |
| const uint32_t op1 = bits(machInst, 19, 16); |
| const uint32_t op2 = bits(machInst, 9, 8); |
| const uint32_t op3 = bits(machInst, 6); |
| const uint32_t rm = bits(machInst, 17, 16); |
| const uint32_t size = bits(machInst, 9, 8); |
| IntRegIndex vd = decodeFpVd(machInst, size, false); |
| IntRegIndex vm = decodeFpVm(machInst, size, false); |
| IntRegIndex vdInt = decodeFpVd(machInst, size, true); |
| IntRegIndex vn = decodeFpVn(machInst, size); |
| if (bits(machInst, 31, 24) == 0xFE && !bits(machInst, 4)) { |
| if (bits(op0, 3) == 0 && op2 != 0 && !op3){ |
| ConditionCode cond; |
| switch(bits(machInst, 21, 20)) { |
| case 0x0: cond = COND_EQ; break; |
| case 0x1: cond = COND_VS; break; |
| case 0x2: cond = COND_GE; break; |
| case 0x3: cond = COND_GT; break; |
| default: panic("unreachable"); |
| } |
| if (size == 3) { |
| return new VselD(machInst, vd, vn, vm, cond); |
| } else { |
| return new VselS(machInst, vd, vn, vm, cond); |
| } |
| } else if (bits(op0, 3) == 1 && bits(op0, 1, 0) == 0 && op2 != 0) { |
| const bool op = bits(machInst, 6); |
| if (op) { |
| if (size == 1) { |
| return new FailUnimplemented("vminnm.f16", machInst); |
| } |
| return decodeNeonSizeSingleDouble<VminnmS, VminnmD>( |
| size, machInst, vd, vn, vm); |
| } else { |
| if (size == 1) { |
| return new FailUnimplemented("vmaxnm.f16", machInst); |
| } |
| return decodeNeonSizeSingleDouble<VmaxnmS, VmaxnmD>( |
| size, machInst, vd, vn, vm); |
| } |
| } else if (bits(op0, 3) && bits(op0, 1, 0) == 3 && |
| bits(op1, 3) && op2 != 0 && op3) |
| { |
| const uint32_t o1 = bits(machInst, 18); |
| if (o1 == 0) { |
| if (size == 3) { |
| switch(rm) { |
| case 0x0: |
| return decodeVfpRegRegOp<VRIntAD>(machInst, vd, vm, |
| true); |
| case 0x1: |
| return decodeVfpRegRegOp<VRIntND>(machInst, vd, vm, |
| true); |
| case 0x2: |
| return decodeVfpRegRegOp<VRIntPD>(machInst, vd, vm, |
| true); |
| case 0x3: |
| return decodeVfpRegRegOp<VRIntMD>(machInst, vd, vm, |
| true); |
| default: return new Unknown(machInst); |
| } |
| } else { |
| switch(rm) { |
| case 0x0: |
| return decodeVfpRegRegOp<VRIntAS>(machInst, vd, vm, |
| false); |
| case 0x1: |
| return decodeVfpRegRegOp<VRIntNS>(machInst, vd, vm, |
| false); |
| case 0x2: |
| return decodeVfpRegRegOp<VRIntPS>(machInst, vd, vm, |
| false); |
| case 0x3: |
| return decodeVfpRegRegOp<VRIntMS>(machInst, vd, vm, |
| false); |
| default: return new Unknown(machInst); |
| } |
| } |
| } else { |
| const bool op = bits(machInst, 7); |
| switch(rm) { |
| case 0x0: |
| switch(size) { |
| case 0x0: |
| return new Unknown(machInst); |
| case 0x1: |
| return new FailUnimplemented( |
| "vcvta.u32.f16", machInst); |
| case 0x2: |
| if (op) { |
| return new VcvtaFpSIntS(machInst, vdInt, vm); |
| } else { |
| return new VcvtaFpUIntS(machInst, vdInt, vm); |
| } |
| case 0x3: |
| if (op) { |
| return new VcvtaFpSIntD(machInst, vdInt, vm); |
| } else { |
| return new VcvtaFpUIntD(machInst, vdInt, vm); |
| } |
| default: return new Unknown(machInst); |
| } |
| case 0x1: |
| switch(size) { |
| case 0x0: |
| return new Unknown(machInst); |
| case 0x1: |
| return new FailUnimplemented( |
| "vcvtn.u32.f16", machInst); |
| case 0x2: |
| if (op) { |
| return new VcvtnFpSIntS(machInst, vdInt, vm); |
| } else { |
| return new VcvtnFpUIntS(machInst, vdInt, vm); |
| } |
| case 0x3: |
| if (op) { |
| return new VcvtnFpSIntD(machInst, vdInt, vm); |
| } else { |
| return new VcvtnFpUIntD(machInst, vdInt, vm); |
| } |
| default: return new Unknown(machInst); |
| } |
| case 0x2: |
| switch(size) { |
| case 0x0: |
| return new Unknown(machInst); |
| case 0x1: |
| return new FailUnimplemented( |
| "vcvtp.u32.f16", machInst); |
| case 0x2: |
| if (op) { |
| return new VcvtpFpSIntS(machInst, vdInt, vm); |
| } else { |
| return new VcvtpFpUIntS(machInst, vdInt, vm); |
| } |
| case 0x3: |
| if (op) { |
| return new VcvtpFpSIntD(machInst, vdInt, vm); |
| } else { |
| return new VcvtpFpUIntD(machInst, vdInt, vm); |
| } |
| default: return new Unknown(machInst); |
| } |
| case 0x3: |
| switch(size) { |
| case 0x0: |
| return new Unknown(machInst); |
| case 0x1: |
| return new FailUnimplemented( |
| "vcvtm.u32.f16", machInst); |
| case 0x2: |
| if (op) { |
| return new VcvtmFpSIntS(machInst, vdInt, vm); |
| } else { |
| return new VcvtmFpUIntS(machInst, vdInt, vm); |
| } |
| case 0x3: |
| if (op) { |
| return new VcvtmFpSIntD(machInst, vdInt, vm); |
| } else { |
| return new VcvtmFpUIntD(machInst, vdInt, vm); |
| } |
| default: return new Unknown(machInst); |
| } |
| default: return new Unknown(machInst); |
| } |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } else { |
| return new Unknown(machInst); |
| } |
| } |
| |
| StaticInstPtr |
| decodeShortFpTransfer(ExtMachInst machInst) |
| { |
| if ((machInst.thumb == 1 && bits(machInst, 28) == 1) || |
| (machInst.thumb == 0 && machInst.condCode == 0xf)) { |
| return decodeFloatingPointDataProcessing(machInst); |
| } |
| const uint32_t l = bits(machInst, 20); |
| const uint32_t c = bits(machInst, 8); |
| const uint32_t a = bits(machInst, 23, 21); |
| const uint32_t q = bits(machInst, 6, 5); |
| if (l == 0 && c == 0) { |
| if (a == 0) { |
| const uint32_t vn = (bits(machInst, 19, 16) << 1) | |
| bits(machInst, 7); |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| if (bits(machInst, 20) == 1) { |
| return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn); |
| } else { |
| return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt); |
| } |
| } else if (a == 0x7) { |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| uint32_t reg = bits(machInst, 19, 16); |
| uint32_t specReg; |
| switch (reg) { |
| case 0: |
| specReg = MISCREG_FPSID; |
| break; |
| case 1: |
| specReg = MISCREG_FPSCR; |
| break; |
| case 6: |
| specReg = MISCREG_MVFR1; |
| break; |
| case 7: |
| specReg = MISCREG_MVFR0; |
| break; |
| case 8: |
| specReg = MISCREG_FPEXC; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| if (specReg == MISCREG_FPSCR) { |
| return new VmsrFpscr(machInst, (IntRegIndex)specReg, rt); |
| } else { |
| uint32_t iss = mcrMrcIssBuild(0, bits(machInst, 3, 0), rt, |
| reg, a, bits(machInst, 7, 5)); |
| return new Vmsr(machInst, (IntRegIndex)specReg, rt, iss); |
| } |
| } |
| } else if (l == 0 && c == 1) { |
| if (bits(a, 2) == 0) { |
| uint32_t vd = (bits(machInst, 7) << 5) | |
| (bits(machInst, 19, 16) << 1); |
| // Handle accessing each single precision half of the vector. |
| vd += bits(machInst, 21); |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| if (bits(machInst, 22) == 1) { |
| return new VmovCoreRegB(machInst, (IntRegIndex)vd, |
| rt, bits(machInst, 6, 5)); |
| } else if (bits(machInst, 5) == 1) { |
| return new VmovCoreRegH(machInst, (IntRegIndex)vd, |
| rt, bits(machInst, 6)); |
| } else if (bits(machInst, 6) == 0) { |
| return new VmovCoreRegW(machInst, (IntRegIndex)vd, rt); |
| } else { |
| return new Unknown(machInst); |
| } |
| } else if (bits(q, 1) == 0) { |
| bool q = bits(machInst, 21); |
| unsigned be = (bits(machInst, 22) << 1) | (bits(machInst, 5)); |
| IntRegIndex vd = (IntRegIndex)(2 * (uint32_t) |
| (bits(machInst, 19, 16) | (bits(machInst, 7) << 4))); |
| IntRegIndex rt = (IntRegIndex)(uint32_t) |
| bits(machInst, 15, 12); |
| if (q) { |
| switch (be) { |
| case 0: |
| return new NVdupQGpr<uint32_t>(machInst, vd, rt); |
| case 1: |
| return new NVdupQGpr<uint16_t>(machInst, vd, rt); |
| case 2: |
| return new NVdupQGpr<uint8_t>(machInst, vd, rt); |
| case 3: |
| return new Unknown(machInst); |
| } |
| } else { |
| switch (be) { |
| case 0: |
| return new NVdupDGpr<uint32_t>(machInst, vd, rt); |
| case 1: |
| return new NVdupDGpr<uint16_t>(machInst, vd, rt); |
| case 2: |
| return new NVdupDGpr<uint8_t>(machInst, vd, rt); |
| case 3: |
| return new Unknown(machInst); |
| } |
| } |
| } |
| } else if (l == 1 && c == 0) { |
| if (a == 0) { |
| const uint32_t vn = (bits(machInst, 19, 16) << 1) | |
| bits(machInst, 7); |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| if (bits(machInst, 20) == 1) { |
| return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn); |
| } else { |
| return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt); |
| } |
| } else if (a == 7) { |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| uint32_t reg = bits(machInst, 19, 16); |
| uint32_t specReg; |
| switch (reg) { |
| case 0: |
| specReg = MISCREG_FPSID; |
| break; |
| case 1: |
| specReg = MISCREG_FPSCR; |
| break; |
| case 6: |
| specReg = MISCREG_MVFR1; |
| break; |
| case 7: |
| specReg = MISCREG_MVFR0; |
| break; |
| case 8: |
| specReg = MISCREG_FPEXC; |
| break; |
| default: |
| return new Unknown(machInst); |
| } |
| if (rt == 0xf) { |
| if (specReg == MISCREG_FPSCR) { |
| return new VmrsApsrFpscr(machInst); |
| } else { |
| return new Unknown(machInst); |
| } |
| } else if (specReg == MISCREG_FPSCR) { |
| return new VmrsFpscr(machInst, rt, (IntRegIndex)specReg); |
| } else { |
| uint32_t iss = mcrMrcIssBuild(l, bits(machInst, 3, 0), rt, |
| reg, a, bits(machInst, 7, 5)); |
| return new Vmrs(machInst, rt, (IntRegIndex)specReg, iss); |
| } |
| } |
| } else { |
| uint32_t vd = (bits(machInst, 7) << 5) | |
| (bits(machInst, 19, 16) << 1); |
| // Handle indexing into each single precision half of the vector. |
| vd += bits(machInst, 21); |
| uint32_t index; |
| const IntRegIndex rt = |
| (IntRegIndex)(uint32_t)bits(machInst, 15, 12); |
| const bool u = (bits(machInst, 23) == 1); |
| if (bits(machInst, 22) == 1) { |
| index = bits(machInst, 6, 5); |
| if (u) { |
| return new VmovRegCoreUB(machInst, rt, |
| (IntRegIndex)vd, index); |
| } else { |
| return new VmovRegCoreSB(machInst, rt, |
| (IntRegIndex)vd, index); |
| } |
| } else if (bits(machInst, 5) == 1) { |
| index = bits(machInst, 6); |
| if (u) { |
| return new VmovRegCoreUH(machInst, rt, |
| (IntRegIndex)vd, index); |
| } else { |
| return new VmovRegCoreSH(machInst, rt, |
| (IntRegIndex)vd, index); |
| } |
| } else if (bits(machInst, 6) == 0 && !u) { |
| return new VmovRegCoreW(machInst, rt, (IntRegIndex)vd); |
| } else { |
| return new Unknown(machInst); |
| } |
| } |
| return new Unknown(machInst); |
| } |
| ''' |
| }}; |
| |
| def format ShortFpTransfer() {{ |
| decode_block = ''' |
| return decodeShortFpTransfer(machInst); |
| ''' |
| }}; |
| |
| let {{ |
| header_output = ''' |
| StaticInstPtr |
| decodeVfpData(ExtMachInst machInst); |
| ''' |
| decoder_output = ''' |
| StaticInstPtr |
| decodeVfpData(ExtMachInst machInst) |
| { |
| const uint32_t opc1 = bits(machInst, 23, 20); |
| const uint32_t opc2 = bits(machInst, 19, 16); |
| const uint32_t opc3 = bits(machInst, 7, 6); |
| //const uint32_t opc4 = bits(machInst, 3, 0); |
| const bool single = (bits(machInst, 8) == 0); |
| // Used to select between vcmp and vcmpe. |
| const bool e = (bits(machInst, 7) == 1); |
| IntRegIndex vd; |
| IntRegIndex vm; |
| IntRegIndex vn; |
| if (single) { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| vm = (IntRegIndex)(bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1)); |
| vn = (IntRegIndex)(bits(machInst, 7) | |
| (bits(machInst, 19, 16) << 1)); |
| } else { |
| vd = (IntRegIndex)((bits(machInst, 22) << 5) | |
| (bits(machInst, 15, 12) << 1)); |
| vm = (IntRegIndex)((bits(machInst, 5) << 5) | |
| (bits(machInst, 3, 0) << 1)); |
| vn = (IntRegIndex)((bits(machInst, 7) << 5) | |
| (bits(machInst, 19, 16) << 1)); |
| } |
| switch (opc1 & 0xb /* 1011 */) { |
| case 0x0: |
| if (bits(machInst, 6) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VmlaS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VmlaD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VmlsS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VmlsD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| case 0x1: |
| if (bits(machInst, 6) == 1) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VnmlaS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VnmlaD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VnmlsS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VnmlsD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| case 0x2: |
| if ((opc3 & 0x1) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VmulS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VmulD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VnmulS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VnmulD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| case 0x3: |
| if ((opc3 & 0x1) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VaddS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VaddD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VsubS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VsubD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| case 0x8: |
| if (machInst.condCode == 0xF) { |
| const bool op = bits(machInst, 6); |
| const uint32_t size = bits(machInst, 9, 8); |
| if (op) { |
| if (size == 1) { |
| return new FailUnimplemented("vminnm.f16", machInst); |
| } |
| return decodeNeonSizeSingleDouble<VminnmS, VminnmD>( |
| size, machInst, vd, vn, vm); |
| } else { |
| if (size == 1) { |
| return new FailUnimplemented("vmaxnm.f16", machInst); |
| } |
| return decodeNeonSizeSingleDouble<VmaxnmS, VmaxnmD>( |
| size, machInst, vd, vn, vm); |
| } |
| } |
| if ((opc3 & 0x1) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VdivS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VdivD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| break; |
| case 0x9: |
| if ((opc3 & 0x1) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VfnmaS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VfnmaD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VfnmsS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VfnmsD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| break; |
| case 0xa: |
| if ((opc3 & 0x1) == 0) { |
| if (single) { |
| return decodeVfpRegRegRegOp<VfmaS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VfmaD>( |
| machInst, vd, vn, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegRegOp<VfmsS>( |
| machInst, vd, vn, vm, false); |
| } else { |
| return decodeVfpRegRegRegOp<VfmsD>( |
| machInst, vd, vn, vm, true); |
| } |
| } |
| break; |
| case 0xb: |
| if ((opc3 & 0x1) == 0) { |
| const uint32_t baseImm = |
| bits(machInst, 3, 0) | (bits(machInst, 19, 16) << 4); |
| if (single) { |
| uint32_t imm = vfp_modified_imm(baseImm, FpDataType::Fp32); |
| return decodeVfpRegImmOp<VmovImmS>( |
| machInst, vd, imm, false); |
| } else { |
| uint64_t imm = vfp_modified_imm(baseImm, FpDataType::Fp64); |
| return decodeVfpRegImmOp<VmovImmD>( |
| machInst, vd, imm, true); |
| } |
| } |
| switch (opc2) { |
| case 0x0: |
| if (opc3 == 1) { |
| if (single) { |
| return decodeVfpRegRegOp<VmovRegS>( |
| machInst, vd, vm, false); |
| } else { |
| return decodeVfpRegRegOp<VmovRegD>( |
| machInst, vd, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegOp<VabsS>( |
| machInst, vd, vm, false); |
| } else { |
| return decodeVfpRegRegOp<VabsD>( |
| machInst, vd, vm, true); |
| } |
| } |
| case 0x1: |
| if (opc3 == 1) { |
| if (single) { |
| return decodeVfpRegRegOp<VnegS>( |
| machInst, vd, vm, false); |
| } else { |
| return decodeVfpRegRegOp<VnegD>( |
| machInst, vd, vm, true); |
| } |
| } else { |
| if (single) { |
| return decodeVfpRegRegOp<VsqrtS>( |
| machInst, vd, vm, false); |
| } else { |
| return decodeVfpRegRegOp<VsqrtD>( |
| machInst, vd, vm, true); |
| } |
| } |
| case 0x2: |
| case 0x3: |
| { |
| const bool toHalf = bits(machInst, 16); |
| const bool top = bits(machInst, 7); |
| if (top) { |
| if (toHalf) { |
| return new VcvtFpSFpHT(machInst, vd, vm); |
| } else { |
| return new VcvtFpHTFpS(machInst, vd, vm); |
| } |
| } else { |
| if (toHalf) { |
| return new VcvtFpSFpHB(machInst, vd, vm); |
| } else { |
| return new VcvtFpHBFpS(machInst, vd, vm); |
| } |
| } |
| } |
| case 0x4: |
| if (single) { |
| if (e) { |
| return new VcmpeS(machInst, vd, vm); |
| } else { |
| return new VcmpS(machInst, vd, vm); |
| } |
| } else { |
| if (e) { |
| return new VcmpeD(machInst, vd, vm); |
| } else { |
| return new VcmpD(machInst, vd, vm); |
| } |
| } |
| case 0x5: |
| if (single) { |
| if (e) { |
| return new VcmpeZeroS(machInst, vd, 0); |
| } else { |
| return new VcmpZeroS(machInst, vd, 0); |
| } |
| } else { |
| if (e) { |
| return new VcmpeZeroD(machInst, vd, 0); |
| } else { |
| return new VcmpZeroD(machInst, vd, 0); |
| } |
| } |
| case 0x7: |
| if (opc3 == 0x3) { |
| if (single) { |
| vd = (IntRegIndex)((bits(machInst, 22) << 5) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpSFpD(machInst, vd, vm); |
| } else { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpDFpS(machInst, vd, vm); |
| } |
| } |
| break; |
| case 0x8: |
| if (bits(machInst, 7) == 0) { |
| if (single) { |
| return new VcvtUIntFpS(machInst, vd, vm); |
| } else { |
| vm = (IntRegIndex)(bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1)); |
| return new VcvtUIntFpD(machInst, vd, vm); |
| } |
| } else { |
| if (single) { |
| return new VcvtSIntFpS(machInst, vd, vm); |
| } else { |
| vm = (IntRegIndex)(bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1)); |
| return new VcvtSIntFpD(machInst, vd, vm); |
| } |
| } |
| case 0x9: |
| if (bits(machInst, 31, 28) != 0xF |
| && bits(machInst, 27, 23) == 0x1D) { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VjcvtSFixedFpD(machInst, vd, vm); |
| } |
| break; |
| case 0xa: |
| { |
| const bool half = (bits(machInst, 7) == 0); |
| const uint32_t imm = bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1); |
| const uint32_t size = |
| (bits(machInst, 7) == 0 ? 16 : 32) - imm; |
| if (single) { |
| if (half) { |
| return new VcvtSHFixedFpS(machInst, vd, vd, size); |
| } else { |
| return new VcvtSFixedFpS(machInst, vd, vd, size); |
| } |
| } else { |
| if (half) { |
| return new VcvtSHFixedFpD(machInst, vd, vd, size); |
| } else { |
| return new VcvtSFixedFpD(machInst, vd, vd, size); |
| } |
| } |
| } |
| case 0xb: |
| { |
| const bool half = (bits(machInst, 7) == 0); |
| const uint32_t imm = bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1); |
| const uint32_t size = |
| (bits(machInst, 7) == 0 ? 16 : 32) - imm; |
| if (single) { |
| if (half) { |
| return new VcvtUHFixedFpS(machInst, vd, vd, size); |
| } else { |
| return new VcvtUFixedFpS(machInst, vd, vd, size); |
| } |
| } else { |
| if (half) { |
| return new VcvtUHFixedFpD(machInst, vd, vd, size); |
| } else { |
| return new VcvtUFixedFpD(machInst, vd, vd, size); |
| } |
| } |
| } |
| case 0xc: |
| if (bits(machInst, 7) == 0) { |
| if (single) { |
| return new VcvtFpUIntSR(machInst, vd, vm); |
| } else { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpUIntDR(machInst, vd, vm); |
| } |
| } else { |
| if (single) { |
| return new VcvtFpUIntS(machInst, vd, vm); |
| } else { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpUIntD(machInst, vd, vm); |
| } |
| } |
| case 0xd: |
| if (bits(machInst, 7) == 0) { |
| if (single) { |
| return new VcvtFpSIntSR(machInst, vd, vm); |
| } else { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpSIntDR(machInst, vd, vm); |
| } |
| } else { |
| if (single) { |
| return new VcvtFpSIntS(machInst, vd, vm); |
| } else { |
| vd = (IntRegIndex)(bits(machInst, 22) | |
| (bits(machInst, 15, 12) << 1)); |
| return new VcvtFpSIntD(machInst, vd, vm); |
| } |
| } |
| case 0xe: |
| { |
| const bool half = (bits(machInst, 7) == 0); |
| const uint32_t imm = bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1); |
| const uint32_t size = |
| (bits(machInst, 7) == 0 ? 16 : 32) - imm; |
| if (single) { |
| if (half) { |
| return new VcvtFpSHFixedS(machInst, vd, vd, size); |
| } else { |
| return new VcvtFpSFixedS(machInst, vd, vd, size); |
| } |
| } else { |
| if (half) { |
| return new VcvtFpSHFixedD(machInst, vd, vd, size); |
| } else { |
| return new VcvtFpSFixedD(machInst, vd, vd, size); |
| } |
| } |
| } |
| case 0xf: |
| { |
| const bool half = (bits(machInst, 7) == 0); |
| const uint32_t imm = bits(machInst, 5) | |
| (bits(machInst, 3, 0) << 1); |
| const uint32_t size = |
| (bits(machInst, 7) == 0 ? 16 : 32) - imm; |
| if (single) { |
| if (half) { |
| return new VcvtFpUHFixedS(machInst, vd, vd, size); |
| } else { |
| return new VcvtFpUFixedS(machInst, vd, vd, size); |
| } |
| } else { |
| if (half) { |
| return new VcvtFpUHFixedD(machInst, vd, vd, size); |
| } else { |
| return new VcvtFpUFixedD(machInst, vd, vd, size); |
| } |
| } |
| } |
| } |
| break; |
| } |
| return new Unknown(machInst); |
| } |
| ''' |
| }}; |
| |
| def format VfpData() {{ |
| decode_block = ''' |
| return decodeVfpData(machInst); |
| ''' |
| }}; |