blob: 72cb7bf530e4dba2245835ef438d91e01237e2a8 [file] [log] [blame]
// Copyright (c) 2007-2008 The Hewlett-Packard Development Company
// Copyright (c) 2015 Advanced Micro Devices, Inc.
// All rights reserved.
//
// The license below extends only to copyright in the software and shall
// not be construed as granting a license to any other intellectual
// property including but not limited to intellectual property relating
// to a hardware implementation of the functionality of the software
// licensed hereunder. You may use the software subject to the license
// terms below provided that you ensure that this notice is replicated
// unmodified and in its entirety in all distributions of the software,
// modified or unmodified, in source code or in binary form.
//
// Copyright (c) 2008 The Regents of The University of Michigan
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met: redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer;
// redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution;
// neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////////
//
// LdStOp Microop templates
//
//////////////////////////////////////////////////////////////////////////
// LEA template
def template MicroLeaExecute {{
Fault %(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA);
%(code)s;
if (fault == NoFault) {
%(op_wb)s;
}
return fault;
}
}};
def template MicroLeaDeclare {{
class %(class_name)s : public %(base_class)s
{
private:
%(reg_idx_arr_decl)s;
public:
template <typename ...Args>
%(class_name)s(ExtMachInst mach_inst, const char *inst_mnem,
uint64_t set_flags, uint8_t data_size, uint8_t address_size,
Request::FlagsType mem_flags, Args ...args) :
%(base_class)s(mach_inst, "%(mnemonic)s", inst_mnem, set_flags,
args..., data_size, address_size, mem_flags, %(op_class)s)
{
%(set_reg_idx_arr)s;
%(constructor)s;
}
Fault execute(ExecContext *, Trace::InstRecord *) const override;
};
}};
// Load templates
def template MicroLoadExecute {{
Fault
%(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA);
fault = readMemAtomic(xc, traceData, EA, Mem, dataSize, memFlags);
if (fault == NoFault) {
%(code)s;
} else if (memFlags & Request::PREFETCH) {
// For prefetches, ignore any faults/exceptions.
return NoFault;
}
if (fault == NoFault) {
%(op_wb)s;
}
return fault;
}
}};
def template MicroLoadInitiateAcc {{
Fault
%(class_name)s::initiateAcc(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA);
fault = initiateMemRead(xc, traceData, EA, %(memDataSize)s, memFlags);
return fault;
}
}};
def template MicroLoadCompleteAcc {{
Fault
%(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
%(op_decl)s;
%(op_rd)s;
getMem(pkt, Mem, dataSize, traceData);
%(code)s;
if (fault == NoFault) {
%(op_wb)s;
}
return fault;
}
}};
// Store templates
def template MicroStoreExecute {{
Fault
%(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA);
%(code)s;
if (fault == NoFault) {
fault = writeMemAtomic(xc, traceData, Mem, dataSize, EA,
memFlags, NULL);
if (fault == NoFault) {
%(op_wb)s;
}
}
return fault;
}
}};
def template MicroStoreInitiateAcc {{
Fault
%(class_name)s::initiateAcc(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
Addr EA;
%(op_decl)s;
%(op_rd)s;
%(ea_code)s;
DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA);
%(code)s;
if (fault == NoFault) {
fault = writeMemTiming(xc, traceData, Mem, dataSize, EA,
memFlags, NULL);
}
return fault;
}
}};
def template MicroStoreCompleteAcc {{
Fault
%(class_name)s::completeAcc(PacketPtr pkt, ExecContext *xc,
Trace::InstRecord *traceData) const
{
%(op_decl)s;
%(op_rd)s;
%(complete_code)s;
%(op_wb)s;
return NoFault;
}
}};
def template MicroLdStOpDeclare {{
class %(class_name)s : public %(base_class)s
{
private:
%(reg_idx_arr_decl)s;
public:
template <typename ...Args>
%(class_name)s(ExtMachInst mach_inst, const char *inst_mnem,
uint64_t set_flags, uint8_t data_size, uint8_t address_size,
Request::FlagsType mem_flags, Args ...args) :
%(base_class)s(mach_inst, "%(mnemonic)s", inst_mnem, set_flags,
args..., data_size, address_size, mem_flags, %(op_class)s)
{
%(set_reg_idx_arr)s;
%(constructor)s;
}
Fault execute(ExecContext *, Trace::InstRecord *) const override;
Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override;
Fault completeAcc(PacketPtr, ExecContext *,
Trace::InstRecord *) const override;
};
}};
// LdStSplitOp is a load or store that uses a pair of regs as the
// source or destination. Used for cmpxchg{8,16}b.
def template MicroLdStSplitOpDeclare {{
class %(class_name)s : public %(base_class)s
{
private:
%(reg_idx_arr_decl)s;
public:
%(class_name)s(ExtMachInst mach_inst, const char *inst_mnem,
uint64_t set_flags,
GpRegIndex data_low, GpRegIndex data_hi,
uint8_t _scale, GpRegIndex _index, GpRegIndex _base,
uint64_t _disp, SegRegIndex _segment,
uint8_t data_size, uint8_t address_size,
Request::FlagsType mem_flags);
Fault execute(ExecContext *, Trace::InstRecord *) const override;
Fault initiateAcc(ExecContext *, Trace::InstRecord *) const override;
Fault completeAcc(PacketPtr, ExecContext *,
Trace::InstRecord *) const override;
};
}};
def template MicroLdStSplitOpConstructor {{
%(class_name)s::%(class_name)s(ExtMachInst mach_inst,
const char *inst_mnem, uint64_t set_flags,
GpRegIndex data_low, GpRegIndex data_hi,
uint8_t _scale, GpRegIndex _index, GpRegIndex _base,
uint64_t _disp, SegRegIndex _segment,
uint8_t data_size, uint8_t address_size,
Request::FlagsType mem_flags) :
%(base_class)s(mach_inst, "%(mnemonic)s", inst_mnem, set_flags,
data_low, data_hi, _scale, _index, _base, _disp, _segment,
data_size, address_size, mem_flags, %(op_class)s)
{
%(set_reg_idx_arr)s;
%(constructor)s;
}
}};
let {{
class LdStOp(X86Microop):
def __init__(self, data, segment, addr, disp,
dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec,
implicitStack, uncacheable):
self.data = data
[self.scale, self.index, self.base] = addr
self.disp = disp
self.segment = segment
self.dataSize = dataSize
self.addressSize = addressSize
self.memFlags = baseFlags
if atCPL0:
self.memFlags += " | (CPL0FlagBit << FlagShift)"
self.instFlags = ""
if prefetch:
self.memFlags += " | Request::PREFETCH"
self.instFlags += " | (1ULL << StaticInst::IsDataPrefetch)"
if nonSpec:
self.instFlags += " | (1ULL << StaticInst::IsNonSpeculative)"
if uncacheable:
self.instFlags += " | (Request::UNCACHEABLE)"
# For implicit stack operations, we should use *not* use the
# alternative addressing mode for loads/stores if the prefix is set
if not implicitStack:
self.memFlags += " | (machInst.legacy.addr ? " + \
"(AddrSizeFlagBit << FlagShift) : 0)"
def getAllocator(self, microFlags):
allocator = '''new %(class_name)s(machInst, macrocodeBlock,
%(flags)s, %(dataSize)s, %(addressSize)s, %(memFlags)s,
%(data)s, %(scale)s, %(index)s, %(base)s,
%(disp)s, %(segment)s)''' % {
"class_name" : self.className,
"flags" : self.microFlagsText(microFlags) + self.instFlags,
"scale" : self.scale, "index" : self.index,
"base" : self.base,
"disp" : self.disp,
"segment" : self.segment, "data" : self.data,
"dataSize" : self.dataSize, "addressSize" : self.addressSize,
"memFlags" : self.memFlags}
return allocator
class BigLdStOp(X86Microop):
def __init__(self, data, segment, addr, disp,
dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec,
implicitStack, uncacheable):
self.data = data
[self.scale, self.index, self.base] = addr
self.disp = disp
self.segment = segment
self.dataSize = dataSize
self.addressSize = addressSize
self.memFlags = baseFlags
if atCPL0:
self.memFlags += " | (CPL0FlagBit << FlagShift)"
self.instFlags = ""
if prefetch:
self.memFlags += " | Request::PREFETCH"
self.instFlags += " | (1ULL << StaticInst::IsDataPrefetch)"
if nonSpec:
self.instFlags += " | (1ULL << StaticInst::IsNonSpeculative)"
if uncacheable:
self.instFlags += " | (Request::UNCACHEABLE)"
# For implicit stack operations, we should use *not* use the
# alternative addressing mode for loads/stores if the prefix is set
if not implicitStack:
self.memFlags += " | (machInst.legacy.addr ? " + \
"(AddrSizeFlagBit << FlagShift) : 0)"
def getAllocator(self, microFlags):
allocString = '''
(%(dataSize)s >= 4) ?
(StaticInstPtr)(new %(class_name)sBig(machInst,
macrocodeBlock, %(flags)s, %(dataSize)s,
%(addressSize)s, %(memFlags)s, %(data)s, %(scale)s,
%(index)s, %(base)s, %(disp)s, %(segment)s)) :
(StaticInstPtr)(new %(class_name)s(machInst,
macrocodeBlock, %(flags)s, %(dataSize)s,
%(addressSize)s, %(memFlags)s, %(data)s, %(scale)s,
%(index)s, %(base)s, %(disp)s, %(segment)s))
'''
allocator = allocString % {
"class_name" : self.className,
"flags" : self.microFlagsText(microFlags) + self.instFlags,
"scale" : self.scale, "index" : self.index,
"base" : self.base,
"disp" : self.disp,
"segment" : self.segment, "data" : self.data,
"dataSize" : self.dataSize, "addressSize" : self.addressSize,
"memFlags" : self.memFlags}
return allocator
class LdStSplitOp(LdStOp):
def __init__(self, data, segment, addr, disp,
dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec,
implicitStack, uncacheable):
super().__init__(0, segment, addr, disp,
dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec,
implicitStack, uncacheable)
(self.dataLow, self.dataHi) = data
def getAllocator(self, microFlags):
allocString = '''(StaticInstPtr)(new %(class_name)s(machInst,
macrocodeBlock, %(flags)s, %(dataLow)s, %(dataHi)s,
%(scale)s, %(index)s, %(base)s, %(disp)s, %(segment)s,
%(dataSize)s, %(addressSize)s, %(memFlags)s))
'''
allocator = allocString % {
"class_name" : self.className,
"flags" : self.microFlagsText(microFlags) + self.instFlags,
"scale" : self.scale, "index" : self.index,
"base" : self.base,
"disp" : self.disp,
"segment" : self.segment,
"dataLow" : self.dataLow, "dataHi" : self.dataHi,
"dataSize" : self.dataSize, "addressSize" : self.addressSize,
"memFlags" : self.memFlags}
return allocator
class MemNoDataOp(X86Microop):
def __init__(self, segment, addr, disp=0,
dataSize="env.dataSize", addressSize="env.addressSize",
baseFlags="0"):
[self.scale, self.index, self.base] = addr
self.disp = disp
self.segment = segment
self.dataSize = dataSize
self.addressSize = addressSize
self.instFlags = ""
self.memFlags = baseFlags + "| " \
"(machInst.legacy.addr ? (AddrSizeFlagBit << FlagShift) : 0)"
def getAllocator(self, microFlags):
allocator = '''new %(class_name)s(machInst, macrocodeBlock,
%(flags)s, %(dataSize)s, %(addressSize)s, %(memFlags)s,
%(scale)s, %(index)s, %(base)s, %(disp)s,
%(segment)s)''' % {
"class_name" : self.className,
"flags" : self.microFlagsText(microFlags) + self.instFlags,
"scale" : self.scale, "index" : self.index,
"base" : self.base,
"disp" : self.disp,
"segment" : self.segment,
"dataSize" : self.dataSize, "addressSize" : self.addressSize,
"memFlags" : self.memFlags}
return allocator
}};
let {{
# Make these empty strings so that concatenating onto
# them will always work.
header_output = ""
decoder_output = ""
exec_output = ""
segmentEAExpr = \
'bits(scale * Index + Base + disp, addressSize * 8 - 1, 0);'
calculateEA = 'EA = SegBase + ' + segmentEAExpr
def defineMicroLoadOp(mnemonic, code, bigCode='',
mem_flags="0", big=True, nonSpec=False,
implicitStack=False, is_float=False):
global header_output
global decoder_output
global exec_output
global microopClasses
Name = mnemonic
name = mnemonic.lower()
if is_float:
base = 'X86ISA::LdStFpOp'
else:
base = 'X86ISA::LdStOp'
# Build up the all register version of this micro op
iops = [InstObjParams(name, Name, base,
{ "code": code,
"ea_code": calculateEA,
"memDataSize": "dataSize" })]
if big:
iops += [InstObjParams(name, Name + "Big", base,
{ "code": bigCode,
"ea_code": calculateEA,
"memDataSize": "dataSize" })]
for iop in iops:
header_output += MicroLdStOpDeclare.subst(iop)
exec_output += MicroLoadExecute.subst(iop)
exec_output += MicroLoadInitiateAcc.subst(iop)
exec_output += MicroLoadCompleteAcc.subst(iop)
if implicitStack:
# For instructions that implicitly access the stack, the address
# size is the same as the stack segment pointer size, not the
# address size if specified by the instruction prefix
addressSize = "env.stackSize"
else:
addressSize = "env.addressSize"
base = LdStOp
if big:
base = BigLdStOp
class LoadOp(base):
def __init__(self, data, segment, addr, disp = 0,
dataSize="env.dataSize",
addressSize=addressSize,
atCPL0=False, prefetch=False, nonSpec=nonSpec,
implicitStack=implicitStack, uncacheable=False):
super().__init__(data, segment, addr,
disp, dataSize, addressSize, mem_flags,
atCPL0, prefetch, nonSpec, implicitStack, uncacheable)
self.className = Name
self.mnemonic = name
microopClasses[name] = LoadOp
defineMicroLoadOp('Ld', 'Data = merge(Data, data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);')
defineMicroLoadOp('Ldis', 'Data = merge(Data, data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
implicitStack=True)
defineMicroLoadOp('Ldst', 'Data = merge(Data, data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
'Request::READ_MODIFY_WRITE')
defineMicroLoadOp('Ldstl', 'Data = merge(Data, data, Mem, dataSize);',
'Data = Mem & mask(dataSize * 8);',
'Request::READ_MODIFY_WRITE | Request::LOCKED_RMW',
nonSpec=True)
defineMicroLoadOp('Ldfp', code='FpData_uqw = Mem', big=False,
is_float=True)
defineMicroLoadOp('Ldfp87', code='''
switch (dataSize)
{
case 4:
FpData_df = *(float *)&Mem;
break;
case 8:
FpData_df = *(double *)&Mem;
break;
default:
panic("Unhandled data size in LdFp87.\\n");
}
''', big=False, is_float=True)
# Load integer from memory into x87 top-of-stack register.
# Used to implement fild instruction.
defineMicroLoadOp('Ldifp87', code='''
switch (dataSize)
{
case 2:
FpData_df = (int64_t)sext<16>(Mem);
break;
case 4:
FpData_df = (int64_t)sext<32>(Mem);
break;
case 8:
FpData_df = (int64_t)Mem;
break;
default:
panic("Unhandled data size in LdIFp87.\\n");
}
''', big=False, is_float=True)
def defineMicroLoadSplitOp(mnemonic, code, mem_flags="0", nonSpec=False):
global header_output
global decoder_output
global exec_output
global microopClasses
Name = mnemonic
name = mnemonic.lower()
iop = InstObjParams(name, Name, 'X86ISA::LdStSplitOp',
{ "code": code,
"ea_code": calculateEA,
"memDataSize": "2 * dataSize" })
header_output += MicroLdStSplitOpDeclare.subst(iop)
decoder_output += MicroLdStSplitOpConstructor.subst(iop)
exec_output += MicroLoadExecute.subst(iop)
exec_output += MicroLoadInitiateAcc.subst(iop)
exec_output += MicroLoadCompleteAcc.subst(iop)
class LoadOp(LdStSplitOp):
def __init__(self, data, segment, addr, disp = 0,
dataSize="env.dataSize",
addressSize="env.addressSize",
atCPL0=False, prefetch=False, nonSpec=nonSpec,
implicitStack=False, uncacheable=False):
super().__init__(data, segment, addr,
disp, dataSize, addressSize, mem_flags,
atCPL0, prefetch, nonSpec, implicitStack, uncacheable)
self.className = Name
self.mnemonic = name
microopClasses[name] = LoadOp
code = '''
DataLow = Mem_u2qw[0];
DataHi = Mem_u2qw[1];
'''
defineMicroLoadSplitOp('LdSplit', code,
'Request::READ_MODIFY_WRITE')
defineMicroLoadSplitOp('LdSplitl', code,
'Request::READ_MODIFY_WRITE | Request::LOCKED_RMW',
nonSpec=True)
def defineMicroStoreOp(mnemonic, code, completeCode="", mem_flags="0",
implicitStack=False, is_float=False, has_data=True):
global header_output
global decoder_output
global exec_output
global microopClasses
Name = mnemonic
name = mnemonic.lower()
if not has_data:
base = 'X86ISA::MemNoDataOp'
elif is_float:
base = 'X86ISA::LdStFpOp'
else:
base = 'X86ISA::LdStOp'
# Build up the all register version of this micro op
iop = InstObjParams(name, Name, base,
{ "code": code,
"complete_code": completeCode,
"ea_code": calculateEA,
"memDataSize": "dataSize" })
header_output += MicroLdStOpDeclare.subst(iop)
exec_output += MicroStoreExecute.subst(iop)
exec_output += MicroStoreInitiateAcc.subst(iop)
exec_output += MicroStoreCompleteAcc.subst(iop)
if implicitStack:
# For instructions that implicitly access the stack, the address
# size is the same as the stack segment pointer size, not the
# address size if specified by the instruction prefix
addressSize = "env.stackSize"
else:
addressSize = "env.addressSize"
if has_data:
class StoreOp(LdStOp):
def __init__(self, data, segment, addr, disp=0,
dataSize="env.dataSize", addressSize=addressSize,
atCPL0=False, nonSpec=False,
implicitStack=implicitStack, uncacheable=False):
super().__init__(data, segment, addr, disp,
dataSize, addressSize, mem_flags, atCPL0, False,
nonSpec, implicitStack, uncacheable)
self.className = Name
self.mnemonic = name
else:
class StoreOp(MemNoDataOp):
def __init__(self, segment, addr, disp=0,
dataSize="env.dataSize", addressSize=addressSize):
super().__init__(segment, addr, disp,
dataSize, addressSize, mem_flags)
self.className = Name
self.mnemonic = name
microopClasses[name] = StoreOp
defineMicroStoreOp('St', 'Mem = PData;')
defineMicroStoreOp('Stis', 'Mem = PData;',
implicitStack=True)
defineMicroStoreOp('Stul', 'Mem = PData;',
mem_flags="Request::LOCKED_RMW")
defineMicroStoreOp('Stfp', code='Mem = FpData_uqw;', is_float=True)
defineMicroStoreOp('Stfp87', code='''
switch (dataSize)
{
case 4: {
float single(FpData_df);
Mem = *(uint32_t *)&single;
} break;
case 8:
Mem = *(uint64_t *)&FpData_df;
break;
default:
panic("Unhandled data size in StFp87.\\n");
}
''', is_float=True)
defineMicroStoreOp('Cda', 'Mem = 0;', mem_flags="Request::NO_ACCESS",
has_data=False)
defineMicroStoreOp('Clflushopt', 'Mem = 0;',
mem_flags="Request::CLEAN | Request::INVALIDATE" +
" | Request::DST_POC", has_data=False)
defineMicroStoreOp('Clwb', 'Mem = 0;', has_data=False,
mem_flags="Request::CLEAN | Request::DST_POC")
def defineMicroStoreSplitOp(mnemonic, code,
completeCode="", mem_flags="0"):
global header_output
global decoder_output
global exec_output
global microopClasses
Name = mnemonic
name = mnemonic.lower()
iop = InstObjParams(name, Name, 'X86ISA::LdStSplitOp',
{ "code": code,
"complete_code": completeCode,
"ea_code": calculateEA,
"memDataSize": "2 * dataSize" })
header_output += MicroLdStSplitOpDeclare.subst(iop)
decoder_output += MicroLdStSplitOpConstructor.subst(iop)
exec_output += MicroStoreExecute.subst(iop)
exec_output += MicroStoreInitiateAcc.subst(iop)
exec_output += MicroStoreCompleteAcc.subst(iop)
class StoreOp(LdStSplitOp):
def __init__(self, data, segment, addr, disp = 0,
dataSize="env.dataSize",
addressSize="env.addressSize",
atCPL0=False, nonSpec=False, implicitStack=False,
uncacheable=False):
super().__init__(data, segment, addr, disp,
dataSize, addressSize, mem_flags, atCPL0, False,
nonSpec, implicitStack, uncacheable)
self.className = Name
self.mnemonic = name
microopClasses[name] = StoreOp
code = '''
Mem_u2qw[0] = DataLow;
Mem_u2qw[1] = DataHi;
'''
defineMicroStoreSplitOp('StSplit', code);
defineMicroStoreSplitOp('StSplitul', code,
mem_flags='Request::LOCKED_RMW')
iop = InstObjParams("lea", "Lea", 'X86ISA::LdStOp',
{ "code": "Data = merge(Data, data, EA, dataSize);",
"ea_code": "EA = " + segmentEAExpr,
"memDataSize": "dataSize" })
header_output += MicroLeaDeclare.subst(iop)
exec_output += MicroLeaExecute.subst(iop)
class LeaOp(LdStOp):
def __init__(self, data, segment, addr, disp = 0,
dataSize="env.dataSize", addressSize="env.addressSize"):
super().__init__(data, segment, addr, disp,
dataSize, addressSize, "0",
False, False, False, False, False)
self.className = "Lea"
self.mnemonic = "lea"
microopClasses["lea"] = LeaOp
iop = InstObjParams("tia", "Tia", 'X86ISA::MemNoDataOp',
{ "code": "xc->demapPage(EA, 0);",
"ea_code": calculateEA,
"memDataSize": "dataSize" })
header_output += MicroLeaDeclare.subst(iop)
exec_output += MicroLeaExecute.subst(iop)
class TiaOp(MemNoDataOp):
className = "Tia"
mnemonic = "tia"
microopClasses["tia"] = TiaOp
}};