diff --git a/docs/guide.rst b/docs/guide.rst index f85c49fb..746d7885 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -84,5 +84,5 @@ SLEIGH & P-Code References -------------------------- Extensive documentation covering SLEIGH and P-Code is available online: -* `SLEIGH, P-Code Introduction `_ -* `P-Code Reference Manual `_ \ No newline at end of file +* `SLEIGH, P-Code Introduction `_ +* `P-Code Reference Manual `_ diff --git a/docs/index.rst b/docs/index.rst index d1b06630..e16623f1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ pypcode documentation ===================== pypcode is a machine code disassembly and IR translation library for Python using the -excellent `SLEIGH `__ library from the `Ghidra `__ framework (version 11.1.2). +excellent `SLEIGH `__ library from the `Ghidra `__ framework (version 11.2.1). This library was created primarily for use with `angr `__, which provides analyses and symbolic execution of p-code. diff --git a/pypcode/processors/68000/data/languages/68000.sinc b/pypcode/processors/68000/data/languages/68000.sinc index 9d53e822..9ad6a661 100644 --- a/pypcode/processors/68000/data/languages/68000.sinc +++ b/pypcode/processors/68000/data/languages/68000.sinc @@ -1529,9 +1529,9 @@ subdiv: regdr:regdq is regdq & regdr & divsz=1 & divsgn=1 { :exg reg9an,regan is op=12 & reg9an & op8=1 & op37=9 & regan { local tmp = reg9an; reg9an=regan; regan=tmp; } :exg reg9dn,regan is op=12 & reg9dn & op8=1 & op37=17 & regan { local tmp = reg9dn; reg9dn=regan; regan=tmp; } -:ext.w regdnw is op=4 & reg9dn=4 & op68=2 & op35=0 & regdnw { local tmp = regdnw:1; regdnw = sext(tmp); } -:ext.l regdn is op=4 & reg9dn=4 & op68=3 & op35=0 & regdn { local tmp = regdn:2; regdn = sext(tmp); } -:extb.l regdn is op=4 & reg9dn=4 & op68=7 & op35=0 & regdn { local tmp = regdn:1; regdn = sext(tmp); } +:ext.w regdnw is op=4 & reg9dn=4 & op68=2 & op35=0 & regdnw { local tmp = regdnw:1; regdnw = sext(tmp); resflags(regdnw); logflags(); } +:ext.l regdn is op=4 & reg9dn=4 & op68=3 & op35=0 & regdn { local tmp = regdn:2; regdn = sext(tmp); resflags(regdn); logflags(); } +:extb.l regdn is op=4 & reg9dn=4 & op68=7 & op35=0 & regdn { local tmp = regdn:1; regdn = sext(tmp); resflags(regdn); logflags(); } @ifdef COLDFIRE :halt is d16=0x4ac8 unimpl @@ -2687,7 +2687,7 @@ m2fpC2: FPCR is FPCR & f12=1 { FPCR = *movemptr; movemptr = movemptr + 1 m2fpC2: is f12=0 { } m2fpC1: m2fpC2" "FPSR is FPSR & f11=1 & m2fpC2 { FPSR = *movemptr; movemptr = movemptr + 12; } m2fpC1: m2fpC2 is f11=0 & m2fpC2 { } -m2fpC0: { m2fpC1" "FPCR } is FPCR & f10=1 & m2fpC1 { FPCR = *movemptr; movemptr = movemptr + 12; } +m2fpC0: { m2fpC1" "FPIAR } is FPIAR & f10=1 & m2fpC1 { FPIAR = *movemptr; movemptr = movemptr + 12; } m2fpC0: { m2fpC1 } is f10=0 & m2fpC1 { } # Floating point control register to Memory diff --git a/pypcode/processors/8051/data/languages/8051.opinion b/pypcode/processors/8051/data/languages/8051.opinion new file mode 100644 index 00000000..07da4fce --- /dev/null +++ b/pypcode/processors/8051/data/languages/8051.opinion @@ -0,0 +1,7 @@ + + + + + + + diff --git a/pypcode/processors/AARCH64/data/languages/AARCH64.ldefs b/pypcode/processors/AARCH64/data/languages/AARCH64.ldefs index 9d83a37e..dad5496b 100644 --- a/pypcode/processors/AARCH64/data/languages/AARCH64.ldefs +++ b/pypcode/processors/AARCH64/data/languages/AARCH64.ldefs @@ -15,6 +15,7 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/AARCH64/data/languages/AARCH64_golang.register.info b/pypcode/processors/AARCH64/data/languages/AARCH64_golang.register.info index ff42eb3c..683cba89 100644 --- a/pypcode/processors/AARCH64/data/languages/AARCH64_golang.register.info +++ b/pypcode/processors/AARCH64/data/languages/AARCH64_golang.register.info @@ -6,5 +6,14 @@ + + + + + + + + + \ No newline at end of file diff --git a/pypcode/processors/ARM/data/languages/ARM.dwarf b/pypcode/processors/ARM/data/languages/ARM.dwarf index 0444463e..4d4a1822 100644 --- a/pypcode/processors/ARM/data/languages/ARM.dwarf +++ b/pypcode/processors/ARM/data/languages/ARM.dwarf @@ -8,5 +8,11 @@ - + diff --git a/pypcode/processors/ARM/data/languages/ARMneon.dwarf b/pypcode/processors/ARM/data/languages/ARMneon.dwarf index fa2ff762..a6cd403b 100644 --- a/pypcode/processors/ARM/data/languages/ARMneon.dwarf +++ b/pypcode/processors/ARM/data/languages/ARMneon.dwarf @@ -10,5 +10,11 @@ - + diff --git a/pypcode/processors/ARM/data/languages/ARMneon.sinc b/pypcode/processors/ARM/data/languages/ARMneon.sinc index eb0da701..d047b3a9 100644 --- a/pypcode/processors/ARM/data/languages/ARMneon.sinc +++ b/pypcode/processors/ARM/data/languages/ARMneon.sinc @@ -619,9 +619,9 @@ define pcodeop SHA1HashUpdateParity; local op1 = Qd; local op2 = Qn; local op3 = Qm; - local op2lo:8 = op2(0); - local op1hi:8 = op1(8); - op2 = zext(op2lo << 64) | zext(op1hi); + local op2LowerHalf = zext(op2[0,64]) << 64; + local op1UpperHalf = zext(op1[64,64]); + op2 = op2LowerHalf | op1UpperHalf; Qd = op1 ^ op2 ^ op3; } @@ -637,15 +637,15 @@ define pcodeop SHA1HashUpdateParity; local X = Qd; local Y = Qm; local Tm = X ^ (Y >> 32); - local t0:4 = Tm(0); - local t1:4 = Tm(4); - local t2:4 = Tm(8); - local t3:4 = Tm(12); + local t0:4 = Tm[0, 32]; + local t1:4 = Tm[32, 32]; + local t2:4 = Tm[64, 32]; + local t3:4 = Tm[96, 32]; local W0:4 = (t0 << 1 | t0 >> 31); local W1:4 = (t1 << 1 | t1 >> 31); local W2:4 = (t2 << 1 | t2 >> 31); local W3:4 = (t3 << 1 | t3 >> 31) ^ (t0 << 2 | t0 >> 30); - Qd = zext(W3 << 96) | zext(W2 << 64) | zext(W1 << 32) | zext(W0); + Qd = (zext(W3) << 96) | (zext(W2) << 64) | (zext(W1) << 32) | zext(W0); } ####### diff --git a/pypcode/processors/ARM/data/languages/ARMv8.sinc b/pypcode/processors/ARM/data/languages/ARMv8.sinc index 96ebdae4..79ff865b 100644 --- a/pypcode/processors/ARM/data/languages/ARMv8.sinc +++ b/pypcode/processors/ARM/data/languages/ARMv8.sinc @@ -119,13 +119,14 @@ dcps_lev:3 is TMode=1 & thv_c0001=0b11 { export 3:1; } :ldaexd^COND Rd,Rd2,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xe9f { + local addr:4 = Rn; build COND; @if ENDIAN == "big" - Rd = *(Rn + 4); - Rd2 = *(Rn); + Rd = *(addr + 4); + Rd2 = *(addr); @else # ENDIAN == "little" - Rd = *(Rn); - Rd2 = *(Rn + 4); + Rd = *(addr); + Rd2 = *(addr + 4); @endif # ENDIAN == "little" } @@ -134,13 +135,14 @@ dcps_lev:3 is TMode=1 & thv_c0001=0b11 { export 3:1; } is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1111 & ItCond & thv_Rt & thv_Rt2 & thv_Rn { + local addr:4 = thv_Rn; build ItCond; @if ENDIAN == "big" - thv_Rt = *(thv_Rn + 4); - thv_Rt2 = *(thv_Rn); + thv_Rt = *(addr + 4); + thv_Rt2 = *(addr); @else # ENDIAN == "little" - thv_Rt = *(thv_Rn); - thv_Rt2 = *(thv_Rn + 4); + thv_Rt = *(addr); + thv_Rt2 = *(addr + 4); @endif # ENDIAN == "little" } diff --git a/pypcode/processors/M16C/data/languages/M16C_60.cspec b/pypcode/processors/M16C/data/languages/M16C_60.cspec new file mode 100644 index 00000000..3949c2a7 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_60.cspec @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/M16C/data/languages/M16C_60.ldefs b/pypcode/processors/M16C/data/languages/M16C_60.ldefs new file mode 100644 index 00000000..18014c05 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_60.ldefs @@ -0,0 +1,20 @@ + + + + + + Renesas M16C/60 16-Bit MicroComputer + + + + diff --git a/pypcode/processors/M16C/data/languages/M16C_60.pspec b/pypcode/processors/M16C/data/languages/M16C_60.pspec new file mode 100644 index 00000000..a1e280dd --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_60.pspec @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pypcode/processors/M16C/data/languages/M16C_60.slaspec b/pypcode/processors/M16C/data/languages/M16C_60.slaspec new file mode 100644 index 00000000..e6046afa --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_60.slaspec @@ -0,0 +1,3627 @@ +# Renesas M16C/60 16-Bit MicroComputer + +# +# Memory Architecture +# +define endian=little; + +define alignment=1; + +define space RAM type=ram_space size=3 default; +define space register type=register_space size=2; + +# +# General Registers +# +define register offset=0x0000 size=2 [ + R1 R3 R0 R2 A0 A1 +]; + +define register offset=0x0000 size=1 [ + R1L R1H _ _ R0L R0H +]; + +define register offset=0x0000 size=4 [ + R3R1 R2R0 A1A0 +]; + +define register offset=0x1000 size=3 [ + PC # Program Counter +]; + +define register offset=0x2000 size=3 [ + INTB # Interrupt Table Register +]; + +define register offset=0x2000 size=2 [ + INTBL INTBH +]; + +define register offset=0x3000 size=2 [ + SP # Current Stack Pointer (Represents active stack pointer: ISP or USP) + FB # Frame Base Register + SB # Static Base Register + FLG # Flag Register + ISP # Interrupt Stack Pointer +]; + +# Flag Register Contents (FLG) +# +# b15 - Reserved area +# b14:b12 - Processor interrupt priority level +# b11:b8 - Reserved area +# b7 - (U) Stack pointer select flag +# b6 - (I) Interrupt enable flag +# b5 - (O) Overflow flag +# b4 - (B) Register bank select flag +# b3 - (S) Sign flag +# b2 - (Z) Zero flag +# b1 - (D) Debug flag +# b0 - (C) Carry flag +@define CARRY "FLG[0,1]" +@define DEBUG "FLG[1,1]" +@define ZERO "FLG[2,1]" +@define SIGN "FLG[3,1]" +@define REG_BANK "FLG[4,1]" +@define OVERFLOW "FLG[5,1]" +@define INTERRUPT "FLG[6,1]" +@define STACK_SEL "FLG[7,1]" +@define IPL "FLG[12,3]" + +# Define context bits +define register offset=0xA000 size=4 contextreg; + +define context contextreg + dstFollowsSrc = (0,1) # =1 destination add-on data follows 4-bit encoded source add-on data + # =2 destination add-on data follows 8-bit data +; + +define token b0(8) + b0_0007 = (0,7) +; + +define token b1(8) + b1_d2 = (0,1) + b1_d3 = (0,2) + b1_d3_2 = (2,2) + b1_2_reg8 = (2,2) + b1_2_regAx = (2,2) + b1_3_regAx = (3,3) + b1_3_reg8 = (3,3) + b1_size_0 = (0,0) + b1_0407 = (4,7) + b1_0307 = (3,7) + b1_0107 = (1,7) + b1_0007 = (0,7) + b1_0002 = (0,2) + b1_bit = (0,2) +; + +define token b2(8) + b2_d4_reg8 = (0,1) + b2_s4_reg8 = (4,5) + b2_d4_reg16 = (0,1) + b2_s4_reg16 = (4,5) + b2_d4_reg32 = (0,0) + b2_s4_reg32 = (4,4) + b2_reg32 = (4,4) + b2_d4_regAxSF = (0,1) # selects A0, A1, SB or FB + b2_s4_regAxSF = (4,5) # selects A0, A1, SB or FB + b2_d4_regAx = (0,0) + b2_s4_regAx = (4,4) + b2_reg16 = (4,6) + b2_creg16 = (4,6) + b2_d4 = (0,3) + b2_d4_3 = (3,3) + b2_d4_23 = (2,3) + b2_d4_13 = (1,3) + b2_s4 = (4,7) + b2_s4_23 = (6,7) + b2_s4_13 = (5,7) + b2_shiftSign_7 = (7,7) + b2_shiftSign_3 = (3,3) + b2_0707 = (7,7) + b2_0607 = (6,7) + b2_0507 = (5,7) + b2_0407 = (4,7) + b2_0406 = (4,6) + b2_0307 = (3,7) + b2_0303 = (3,3) + b2_0007 = (0,7) + b2_0003 = (0,3) + b2_0002 = (0,2) + b2_simm4_0407 = (4,7) signed + b2_simm4_0003 = (0,3) signed +; + +define token b3(8) + b3_0407 = (4,7) + b3_0007 = (0,7) + b3_0003 = (0,3) +; + +define token b4(8) + b4_0007 = (0,7) +; + +define token b5(8) + b5_0007 = (0,7) +; + +define token b6(8) + b6_0007 = (0,7) +; + +define token imm8(8) + simm8_dat = (0,7) signed + imm8_dat = (0,7) + imm8_base = (3,7) # bit,base byte displacement + imm8_bit = (0,2) # bit,base bit number + simm8_base = (3,7) signed # bit,base signed byte displacement + simm8_bit = (0,2) # bit,base signed bit number + imm6_dat = (0,5) # int number + cnd8_dat = (0,7) + imm8_0607 = (6,7) + imm8_0407 = (4,7) + imm8_0003 = (0,3) + regBit7 = (7,7) + regBit6 = (6,6) + regBit5 = (5,5) + regBit4 = (4,4) + regBit3 = (3,3) + regBit2 = (2,2) + regBit1 = (1,1) + regBit0 = (0,0) +; + +define token imm16(16) + simm16_dat = (0,15) signed + imm16_dat = (0,15) + imm16_base = (3,15) # bit,base byte displacement + imm16_bit = (0, 2) # bit,base bit number +; + +define token imm24(24) + simm24_dat = (0,23) signed + imm24_dat = (0,23) + simm20_dat = (0,19) + imm20_dat = (0,19) +; + +define token imm32(32) + simm32_dat = (0,31) signed + imm32_dat = (0,31) +; + +attach variables [ b2_s4_reg16 b2_d4_reg16 ] [ R0 R1 R2 R3 ]; +attach variables [ b2_s4_reg8 b2_d4_reg8 ] [ R0L R0H R1L R1H ]; +attach variables [ b1_2_reg8 b1_3_reg8 ] [ R0L R0H ]; +attach variables [ b2_s4_regAx b2_d4_regAx b1_3_regAx b1_2_regAx ] [ A0 A1 ]; +attach variables [ b2_s4_regAxSF b2_d4_regAxSF ] [ A0 A1 SB FB ]; +attach variables [ b2_reg16 ] [ R0 R1 R2 R3 A0 A1 _ _ ]; +attach variables [ b2_creg16 ] [ _ INTBL INTBH FLG ISP SP SB FB ]; +attach variables [ b2_reg32 b2_d4_reg32 ] [ R2R0 R3R1 ]; + +# +# PCode Op +# +define pcodeop Break; # BRK +define pcodeop DecimalAdd; # DADD +define pcodeop DecimalAddWithCarry; # DADC +define pcodeop DecimalSubtractWithBorrow; # DSBB +define pcodeop DecimalSubtract; # DSUB +define pcodeop Wait; # WAIT + +# +# FLAG MACROS... +# +# Set zero and sign flags from result +macro setResultFlags(result) { + $(SIGN) = (result s< 0x0); + $(ZERO) = (result == 0x0); +} + +# Set carry and overflow flags for addition +macro setAdd3Flags(v1, v2, v3) { + local add13 = v1 + v3; + $(CARRY) = carry(v1,v3) || carry(v2,add13); + $(OVERFLOW) = scarry(v1,v3) || scarry(v2,add13); +} + +# Set carry and overflow flags for addition +macro setAddFlags(v1, v2) { + $(CARRY) = carry(v1, v2); + $(OVERFLOW) = scarry(v1, v2); +} + +# Set overflow flags for subtraction of op3,op2 from op1 (op1-op2-op3) +macro setSubtract3Flags(v1, v2, v3) { + local add12 = v1 - v2; + $(CARRY) = (v1 >= v2) || (add12 >= v3); + $(OVERFLOW) = sborrow(v1, v2) || sborrow(add12, v3); +} + +# Set overflow flags for subtraction of op2 from op1 (op1-op2) +macro setSubtractFlags(v1, v2) { + $(CARRY) = (v1 s>= v2); + $(OVERFLOW) = sborrow(v1, v2); +} + +macro push1(val) { + SP = SP - 1; + ptr:3 = zext(SP); + *:1 ptr = val; +} + +macro push2(val) { + SP = SP - 2; + ptr:3 = zext(SP); + *:2 ptr = val; +} + +macro push3(val) { + SP = SP - 3; + ptr:3 = zext(SP); + *:3 ptr = val; +} + +macro push4(val) { + SP = SP - 4; + ptr:3 = zext(SP); + *:4 ptr = val; +} + +macro pop1(val) { + ptr:3 = zext(SP); + val = *:1 ptr; + SP = SP + 1; +} + +macro pop2(val) { + ptr:3 = zext(SP); + val = *:2 ptr; + SP = SP + 2; +} + +macro pop3(val) { + ptr:3 = zext(SP); + val = *:3 ptr; + SP = SP + 3; +} + +macro pop4(val) { + ptr:3 = zext(SP); + val = *:4 ptr; + SP = SP + 4; +} + +# +# Source operand location data +# +# Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement +src4dsp8: imm8_dat^":8" is b1_0007; b2_s4; imm8_dat { export *[const]:2 imm8_dat; } +src4dsp8: simm8_dat^":8" is b1_0007; b2_s4=0xb; simm8_dat { export *[const]:2 simm8_dat; } +src4dsp16: imm16_dat^":16" is b1_0007; b2_s4; imm16_dat { export *[const]:2 imm16_dat; } + +# src4... Handle 4-bit encoded Source specified by b2_s4(4-bits) +# Variable length pattern starting at instruction byte b1 +# associated src4 add-on data immediately follows instruction byte b2 +# abs16 cases are broken out differently to facilitate export of constant addresses in certain cases +# 1-Byte source value/location specified by 4-bit encoding (b2_d4) +src4B: b2_s4_reg8 is b1_0007; b2_s4_23=0x0 & b2_s4_reg8 { export b2_s4_reg8; } # Rx +src4B: b2_s4_regAx is b1_0007; b2_s4_13=0x2 & b2_s4_regAx { tmp:1 = b2_s4_regAx:1; export tmp; } # Ax +src4B: [b2_s4_regAx] is b1_0007; b2_s4_13=0x3 & b2_s4_regAx { ptr:3 = zext(b2_s4_regAx); export *:1 ptr; } # [Ax] +src4B: src4dsp8^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x2 & b2_s4_regAxSF) ... & src4dsp8 { ptr:3 = zext(b2_s4_regAxSF + src4dsp8); export *:1 ptr; } # dsp:8[Ax|SB|FB] +src4B: src4dsp16^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x3 & b2_s4_regAxSF) ... & src4dsp16 { ptr:3 = zext(b2_s4_regAxSF + src4dsp16); export *:1 ptr; } # dsp:16[Ax|SB|FB] +src4B: imm16_dat is b1_0007; b2_s4=0xf; imm16_dat { export *:1 imm16_dat; } # abs16 (special constant address case) + +# 2-Byte source value/location specified by 2-bit encoding (b2_d4) +src4W: b2_s4_reg16 is b1_0007; b2_s4_23=0x0 & b2_s4_reg16 { export b2_s4_reg16; } # Rx +src4W: b2_s4_regAx is b1_0007; b2_s4_13=0x2 & b2_s4_regAx { export b2_s4_regAx; } # Ax +src4W: [b2_s4_regAx] is b1_0007; b2_s4_13=0x3 & b2_s4_regAx { ptr:3 = zext(b2_s4_regAx); export *:2 ptr; } # [Ax] +src4W: src4dsp8^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x2 & b2_s4_regAxSF) ... & src4dsp8 { ptr:3 = zext(b2_s4_regAxSF + src4dsp8); export *:2 ptr; } # dsp:8[Ax|SB|FB] +src4W: src4dsp16^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x3 & b2_s4_regAxSF) ... & src4dsp16 { ptr:3 = zext(b2_s4_regAxSF + src4dsp16); export *:2 ptr; } # dsp:16[Ax|SB|FB] +src4W: imm16_dat is b1_0007; b2_s4=0xf; imm16_dat { export *:2 imm16_dat; } # abs16 (special constant address case) + +# +# Destination operand location data (may also be used as a source in certain cases) +# +# Skip instruction and source add-on bytes which occur before destination add-on bytes +# Starting position is at b1 +skipBytesBeforeDst4: is b1_0007; b2_s4 { } +skipBytesBeforeDst4: is dstFollowsSrc=1 & b1_0007; b2_s4_23=0x2; imm8_dat { } # src4: dsp8 +skipBytesBeforeDst4: is dstFollowsSrc=1 & b1_0007; b2_s4_23=0x3; imm16_dat { } # src4: dsp16/abs16 +skipBytesBeforeDst4: is dstFollowsSrc=2 & b1_0007; b2_d4; imm8_dat { } # dsp8 + +# Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement +dst4dsp8: imm8_dat^":8" is (skipBytesBeforeDst4; imm8_dat) { export *[const]:2 imm8_dat; } +dst4dsp8: simm8_dat^":8" is (b1_0007; b2_d4=0xb) ... & (skipBytesBeforeDst4; simm8_dat) { export *[const]:2 simm8_dat; } +dst4dsp16: imm16_dat^":16" is (skipBytesBeforeDst4; imm16_dat) { export *[const]:2 imm16_dat; } + +# dst4... Handle 4-bit encoded Destination specified by b2_d4(4-bits) +# Ax direct case is read-only! Instruction must use dst4Ax for write/update case +# Variable length pattern starting at instruction byte b1 +# abs16 cases are broken out differently to facilitate export of constant addresses in certain cases +# 1-Byte destination value/location specified by 4-bit encoding (b2_d4) +dst4B: b2_d4_reg8 is b1_0007; b2_d4_23=0x0 & b2_d4_reg8 { export b2_d4_reg8; } # Rx +dst4B: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { tmp:1 = b2_d4_regAx:1; export tmp; } # Ax - read-only use ! +dst4B: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:1 ptr; } # [Ax] +dst4B: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:1 ptr; } # dsp:8[Ax|SB|FB] +dst4B: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:1 ptr; } # dsp:16[Ax|SB] +dst4B: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:1 imm16_dat; } # abs16 (special constant address case) + +# 2-Byte destination value/location specified by 4-bit encoding (b2_d4) +dst4W: b2_d4_reg16 is b1_0007; b2_d4_23=0x0 & b2_d4_reg16 { export b2_d4_reg16; } # Rx +dst4W: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { export b2_d4_regAx; } # Ax +dst4W: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:2 ptr; } # [Ax] +dst4W: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:2 ptr; } # dsp:8[Ax|SB|FB] +dst4W: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:2 ptr; } # dsp:16[Ax|SB] +dst4W: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:2 imm16_dat; } # abs16 (special constant address case) + +# 4-Byte destination value/location specified by 4-bit encoding (b2_d4) +dst4L: b2_d4_reg32 is b1_0007; b2_d4_13=0x0 & b2_d4_reg32 { export b2_d4_reg32; } # Rx +dst4L: A1A0 is A1A0 & b1_0007; b2_d4=0x4 { export A1A0; } # A1A0 +dst4L: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:4 ptr; } # [Ax] +dst4L: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:4 ptr; } # dsp:8[Ax|SB|FB] +dst4L: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:4 ptr; } # dsp:16[Ax|SB] +dst4L: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:4 imm16_dat; } # abs16 (special constant address case) + +# 3-Byte destination value/location specified by 4-bit encoding (b2_d4) - use DST4L to constrain, and dst4L for register Ax/Rx non-memory cases +dst4T: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:3 ptr; } # [Ax] +dst4T: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:3 ptr; } # dsp:8[Ax|SB|FB] +dst4T: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:3 ptr; } # dsp:16[Ax|SB] +dst4T: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:3 imm16_dat; } # abs16 (special constant address case) + +# 3-Byte effective address specified by 4-bit encoding (b2_d4) +dst4A: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export ptr; } # dsp:8[Ax|SB|FB] +dst4A: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export ptr; } # dsp:16[Ax|SB] +dst4A: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *[const]:3 imm16_dat; } # abs16 (special constant address case) + +# Ax destination specified by 4-bit encoding (b2_d4) +# NOTE! Ax destination is special case and must be handled separately by each instruction +# Starting position is at instruction b1 +dst4Ax: b2_d4_regAx is b1_0007; b2_d4_regAx { export b2_d4_regAx; } + +# 1/2-Byte destination value/location specified by 4-bit encoding (b2_d4) +# This handles the case for dst4B, dst4W and dst4L where 5-bit encoded Source (src4) add-on bytes may exist before Destination add-on bytes +# Variable length pattern starting at instruction byte b1 +dst4B_afterSrc4: dst4B is dst4B [ dstFollowsSrc=1; ] { export dst4B; } + +dst4W_afterSrc4: dst4W is dst4W [ dstFollowsSrc=1; ] { export dst4W; } + +dst4L_afterSrc4: dst4L is dst4L [ dstFollowsSrc=1; ] { export dst4L; } + +# +# The following macros are used to constrain bit patterns when using dst4 +# These should be used by constructor pattern matching instead of the corresponding dst4 sub-constructor +# +@define DST4AX "((b1_0007; b2_d4_13=0x2) & dst4Ax)" +@define DST4A "((b1_0007; b2_d4_3=1) ... & dst4A)" +@define DST4T "((b1_0007; (b2_d4_3=1 | b2_d4_13=3)) ... & dst4T)" + +# Skip instruction and source add-on bytes which occur before destination add-on bytes +# Starting position is at b1 +skipBytesBeforeDst2: is b1_d2 { } +skipBytesBeforeDst2: is dstFollowsSrc=2 & b1_d2; imm8_dat { } # dsp8 + +# +# destination value/location specified by 2/3-bit encoding, R0H/R0L choice controlled by destination-bit (b1_0002) +# +dst2B: R0L is (R0L & b1_d3=0x4) { export R0L; } +dst2B: R0H is (R0H & b1_d3=0x0) { export R0H; } +dst2B: imm8_dat^[SB] is (SB & b1_d2=0x1) ... & (skipBytesBeforeDst2; imm8_dat) { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } +dst2B: simm8_dat^[FB] is (FB & b1_d2=0x2) ... & (skipBytesBeforeDst2; simm8_dat) { ptr:3 = zext(FB + simm8_dat); export *:1 ptr; } +dst2B: imm16_dat is (b1_d2=0x3) ... & (skipBytesBeforeDst2; imm16_dat) { export *:1 imm16_dat; } + +# +# destination value/location specified by 3-bit encoding (must be constrained by DST3B or DST3B_AFTER_DSP8) +# +dst3B: R0L is (R0L & b1_d3=0x4) { export R0L; } +dst3B: R0H is (R0H & b1_d3=0x3) { export R0H; } +dst3B: imm8_dat^[SB] is (SB & b1_d3=0x5) ... & (skipBytesBeforeDst2; imm8_dat) { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } +dst3B: simm8_dat^[FB] is (FB & b1_d3=0x6) ... & (skipBytesBeforeDst2; simm8_dat) { ptr:3 = zext(FB + simm8_dat); export *:1 ptr; } +dst3B: imm16_dat is (b1_d3=0x7) ... & (skipBytesBeforeDst2; imm16_dat) { export *:1 imm16_dat; } + +# 1-Byte destination value/location specified by 3-bit encoding (b2_d3) +# This handles the case for dst3B where Dsp8 add-on bytes always exist before Destination add-on bytes +# Variable length pattern starting at instruction byte b1 +dst3B_afterDsp8: dst3B is dst3B [ dstFollowsSrc=2; ] { export dst3B; } + +# +# The following macros are used to constrain bit patterns when using dst2 for a 3-bit src/dest +# These should be used by constructor pattern matching instead of the corresponding dst4 sub-constructor +# +@define DST3B "((b1_d3=3 | b1_d3_2=1) ... & dst3B)" +@define DST3B_AFTER_DSP8 "((b1_d3=3 | b1_d3_2=1) ... & dst3B_afterDsp8)" + +# Special dsp8[SP] source/destination - starting point is on dsp8 data +dsp8spB: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = zext(SP + simm8_dat); export *:1 ptr; } + +dsp8spW: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = zext(SP + simm8_dat); export *:2 ptr; } + +# Special dsp20[A0] source/destination - starting point is on dsp20 data +dsp20A0B: simm20_dat^":20["^A0^"]" is A0 & simm20_dat { ptr:3 = zext(A0 + simm20_dat); export *:1 ptr; } + +dsp20A0W: simm20_dat^":20["^A0^"]" is A0 & simm20_dat { ptr:3 = zext(A0 + simm20_dat); export *:2 ptr; } + +# +# Bit base - associated add-on data immediately follows instruction byte b2 +# There are three cases which must be broken-out by instruction (regBase, memBaseAx, memBase) +# +# bit-base is bit,byte specified by [Ax] (constrain instruction pattern using b2_d4_13=0x3) - contexts of Ax are exported +memBaseAx: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { export b2_d4_regAx; } # [Ax] (special case! bit operand does not appear) + +# bit-base is 16-bit register: Rx or Ax (constrain instruction pattern using b2_d4_3=0) +regBase: b2_d4_reg16 is b1_0007; b2_d4_23=0x0 & b2_d4_reg16 { export b2_d4_reg16; } # Rx +regBase: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { export b2_d4_regAx; } # Ax + +# bit-base is byte location within memory +memBase: imm8_base^":8"^[b2_d4_regAxSF] is b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF; imm8_base { ptr:3 = zext(b2_d4_regAxSF + imm8_base); export *:1 ptr; } # dsp:8[Ax|SB] +memBase: simm8_base^":8"^[FB] is b1_0007; b2_d4_23=0x2 & b2_d4=0xb & FB; simm8_base { ptr:3 = zext(FB + simm8_base); export *:1 ptr; } # dsp:8[FB] +memBase: imm16_base^":16"^[b2_d4_regAxSF] is b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF; imm16_base { ptr:3 = zext(b2_d4_regAxSF + imm16_base); export *:1 ptr; } # dsp:16[Ax|SB] +memBase: imm16_base^":16" is b1_0007; b2_d4=0xf; imm16_base { export *:1 imm16_base; } # abs16 (special constant address case) + +memBase11: imm8_dat^":11"^[SB] is SB & b1_0007; imm8_dat { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } # dsp:11[SB] + +# Bit operand associated with regBase operand +# TODO: imm8_0407=0 constraint removed due to sleigh compiler issue +regBit: imm8_0003 is b1_0007; b2_d4; imm8_0003 { export *[const]:1 imm8_0003; } # Rx, Ax + +# Bit operand associated with memBase operand +memBit: imm8_bit is b1_0007; b2_d4; imm8_bit { export *[const]:1 imm8_bit; } # dsp:8[Ax|SB|FB] +memBit: imm16_bit is b1_0007; b2_d4_23=3; imm16_bit { export *[const]:1 imm16_bit; } # dsp:16[Ax|SB], base:16 + +# +# Immediate data operand +# Fixed length - current position is at start of immediate data +# +srcImm3: "#"^b2_0002 is b2_0002 { export *[const]:1 b2_0002; } +srcImm8: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } +srcImm16: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } + +srcSimm8: "#"^simm8_dat is simm8_dat { export *[const]:1 simm8_dat; } +srcSimm16: "#"^simm16_dat is simm16_dat { export *[const]:2 simm16_dat; } + +# Signed immediate data from 4-bit value: -8 <= value <= 7 +# NOTE! There are two different cases based upon the bits used from b2 +srcSimm4_0407: "#"^b2_simm4_0407 is b2_simm4_0407 { export *[const]:1 b2_simm4_0407; } +srcSimm4_0003: "#"^b2_simm4_0003 is b2_simm4_0003 { export *[const]:1 b2_simm4_0003; } + +# Signed immediate shift amount from 4-bit value: -8 <= value <= -1 || 1 <= value <= 8 +# NOTE! There are two different cases based upon the bits used from b2 +srcSimm4Shift_0407: "#"^val is b2_shiftSign_7=0 & b2_0406 [ val = b2_0406 + 1; ] { export *[const]:1 val; } +srcSimm4Shift_0407: "#"^val is b2_shiftSign_7=1 & b2_0406 [ val = -(b2_0406 + 1); ] { export *[const]:1 val; } +srcSimm4Shift_0003: "#"^val is b2_shiftSign_3=0 & b2_0002 [ val = b2_0002 + 1; ] { export *[const]:1 val; } +srcSimm4Shift_0003: "#"^val is b2_shiftSign_3=1 & b2_0002 [ val = -(b2_0002 + 1); ] { export *[const]:1 val; } + +srcZero8: "#0" is b1_0007 { export 0:1; } + +# special 6-bit immediate for INT number +srcIntNum: "#"^imm6_dat is imm6_dat { export *[const]:1 imm6_dat; } + +# +# Offset label operand +# +abs20offset: imm20_dat is imm20_dat { export *:1 imm20_dat; } + +abs20offsetW: imm20_dat is imm20_dat { export *:2 imm20_dat; } + +abs16offset: imm16_dat is imm16_dat { export *:1 imm16_dat; } + +# Relative address offsets +rel16offset1: offs is simm16_dat [ offs = inst_start + 1 + simm16_dat; ] { export *:1 offs; } + +rel8offset1: offs is simm8_dat [ offs = inst_start + 1 + simm8_dat; ] { export *:1 offs; } +rel8offset2: offs is simm8_dat [ offs = inst_start + 2 + simm8_dat; ] { export *:1 offs; } + +rel3offset2: offs is b1_0002 [ offs = inst_start + 2 + b1_0002; ] { export *:1 offs; } + +reloffset_dst4W: dst4W is dst4W { local reladdr = inst_start + dst4W; export *:3 reladdr; } + +reloffset_dst4L: dst4L is dst4L { local reladdr = inst_start + dst4L; export *:3 reladdr; } + +reloffset_dst4T: dst4T is $(DST4T) { local reladdr = inst_start + dst4T; export *:3 reladdr; } + +# +# Conditionals +# +cnd8: "GEU" is cnd8_dat=0x00 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +cnd8: "GTU" is cnd8_dat=0x01 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +cnd8: "EQ" is cnd8_dat=0x02 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +cnd8: "N" is cnd8_dat=0x03 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) +cnd8: "LE" is cnd8_dat=0x04 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +cnd8: "O" is cnd8_dat=0x05 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +cnd8: "GE" is cnd8_dat=0x06 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +cnd8: "LTU" is cnd8_dat=0xf8 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +cnd8: "LEU" is cnd8_dat=0xf9 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +cnd8: "NE" is cnd8_dat=0xfa { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +cnd8: "PZ" is cnd8_dat=0xfb { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +cnd8: "GT" is cnd8_dat=0xfc { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +cnd8: "NO" is cnd8_dat=0xfd { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +cnd8: "LT" is cnd8_dat=0xfe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +b2cnd4: "GEU" is b2_0003=0x0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +b2cnd4: "GTU" is b2_0003=0x1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +b2cnd4: "EQ" is b2_0003=0x2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +b2cnd4: "N" is b2_0003=0x3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) +b2cnd4: "LTU" is b2_0003=0x4 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +b2cnd4: "LEU" is b2_0003=0x5 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +b2cnd4: "NE" is b2_0003=0x6 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +b2cnd4: "PZ" is b2_0003=0x7 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +b2cnd4: "LE" is b2_0003=0x8 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +b2cnd4: "O" is b2_0003=0x9 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +b2cnd4: "GE" is b2_0003=0xa { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +b2cnd4: "GT" is b2_0003=0xc { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +b2cnd4: "NO" is b2_0003=0xd { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +b2cnd4: "LT" is b2_0003=0xe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +# Special case of b2cnd4 where b2_0003=1 (see JCnd) +b2cnd3: "LE" is b2_0002=0x0 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +b2cnd3: "O" is b2_0002=0x1 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +b2cnd3: "GE" is b2_0002=0x2 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +b2cnd3: "GT" is b2_0002=0x4 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +b2cnd3: "NO" is b2_0002=0x5 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +b2cnd3: "LT" is b2_0002=0x6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +b1cnd3: "LTU" is b1_0002=4 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +b1cnd3: "LEU" is b1_0002=5 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +b1cnd3: "NE" is b1_0002=6 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +b1cnd3: "PZ" is b1_0002=7 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +b1cnd3: "GEU" is b1_0002=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +b1cnd3: "GTU" is b1_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +b1cnd3: "EQ" is b1_0002=2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +b1cnd3: "N" is b1_0002=3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) + +# +# Flag bit operand +# +flagBit: "C" is b2_0406=0 { export 0:2; } +flagBit: "D" is b2_0406=1 { export 1:2; } +flagBit: "Z" is b2_0406=2 { export 2:2; } +flagBit: "S" is b2_0406=3 { export 3:2; } +flagBit: "B" is b2_0406=4 { export 4:2; } +flagBit: "O" is b2_0406=5 { export 5:2; } +flagBit: "I" is b2_0406=6 { export 6:2; } +flagBit: "U" is b2_0406=7 { export 7:2; } + +# +# Instruction Constructors +# +### ABS ### +:ABS.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xf) ... & dst4B { + local tmp = dst4B; + $(OVERFLOW) = (tmp == 0x80); + local ztst = (tmp s< 0); + tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); + dst4B = tmp; + setResultFlags(tmp); +} + +# 0111 0110 1111 0100 ABS.B A0 +# 0111 0110 1111 0001 ABS.B R0H +:ABS.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xf) & $(DST4AX) { + local tmp = dst4Ax:1; + $(OVERFLOW) = (tmp == 0x80); + local ztst = (tmp s< 0); + tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +:ABS.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xf) ... & dst4W { + local tmp = dst4W; + $(OVERFLOW) = (tmp == 0x8000); + local ztst = (tmp s< 0); + tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); + dst4W = tmp; + setResultFlags(tmp); +} + +### ADC ### + +# (1) ADC.B #simm, dst +:ADC.B srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x6) ... & dst4B); srcSimm8 { + tmp:1 = dst4B; + c:1 = $(CARRY); + setAdd3Flags(tmp, srcSimm8, c); + tmp = tmp + srcSimm8 + c; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) ADC.B #simm, Ax +:ADC.B srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x6) & $(DST4AX)); srcSimm8 { + tmp:1 = dst4Ax:1; + c:1 = $(CARRY); + setAdd3Flags(tmp, srcSimm8, c); + tmp = tmp + srcSimm8 + c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ADC.W #simm, dst +:ADC.W srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x6) ... & dst4W); srcSimm16 { + tmp:2 = dst4W; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, srcSimm16, c); + tmp = tmp + srcSimm16 + c; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) ADC.B src, dst +:ADC.B src4B, dst4B_afterSrc4 is (b1_0107=0x58 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4; + src:1 = src4B; + c:1 = $(CARRY); + setAdd3Flags(tmp, src, c); + tmp = tmp + src + c; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (2) ADC.B src, Ax +:ADC.B src4B, dst4Ax is (b1_0107=0x58 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1; + src:1 = src4B; + c:1 = $(CARRY); + setAdd3Flags(tmp, src, c); + tmp = tmp + src + c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ADC.W src, dst +:ADC.W src4W, dst4W_afterSrc4 is (b1_0107=0x58 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4; + src:2 = src4W; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, src, c); + tmp = tmp + src + c; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + + +### ADCF ### + +:ADCF.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xe) ... & dst4B { + tmp:1 = dst4B; + c:1 = $(CARRY); + setAddFlags(tmp, c); + tmp = tmp + c; + dst4B = tmp; + setResultFlags(tmp); +} + +:ADCF.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xe) & $(DST4AX) { + tmp:1 = dst4Ax:1; + c:1 = $(CARRY); + setAddFlags(tmp, c); + tmp = tmp + c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +:ADCF.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xe) ... & dst4W { + tmp:2 = dst4W; + c:2 = zext($(CARRY)); + setAddFlags(tmp, c); + tmp = tmp + c; + dst4W = tmp; + setResultFlags(tmp); +} + +### ADD ### + +# (1) ADD.B:G #simm, dst +:ADD^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x4) ... & dst4B); srcSimm8 { + tmp:1 = dst4B; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) ADD.B:G #simm, Ax +:ADD^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x4) & $(DST4AX)); srcSimm8 { + tmp:1 = dst4Ax:1; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ADD.W:G #simm, dst +:ADD^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x4) ... & dst4W); srcSimm16 { + tmp:2 = dst4W; + setAddFlags(tmp, srcSimm16); + tmp = tmp + srcSimm16; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) ADD.B:Q #simm4, dst +:ADD^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x64 & b1_size_0=0; srcSimm4_0407) ... & dst4B { + tmp:1 = dst4B; + setAddFlags(tmp, srcSimm4_0407); + tmp = tmp + srcSimm4_0407; + dst4B = tmp; + setResultFlags(tmp); +} + +# (2) ADD.B:Q #simm4, Ax +:ADD^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x64 & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { + tmp:1 = dst4Ax:1; + setAddFlags(tmp, srcSimm4_0407); + tmp = tmp + srcSimm4_0407; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ADD.W:Q #simm4, dst +:ADD^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x64 & b1_size_0=1; srcSimm4_0407) ... & dst4W { + tmp:2 = dst4W; + imm:2 = sext(srcSimm4_0407); + setAddFlags(tmp, imm); + tmp = tmp + imm; + dst4W = tmp; + setResultFlags(tmp); +} + +# (3) ADD.B:S #imm, dst +:ADD^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x10; srcSimm8) ... & $(DST3B_AFTER_DSP8) { + tmp:1 = dst3B_afterDsp8; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst3B_afterDsp8 = tmp; + setResultFlags(tmp); +} + +# (4) ADD.B:G src, dst +:ADD^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x50 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4; + src:1 = src4B; + setAddFlags(tmp, src); + tmp = tmp + src; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (4) ADD.B:G src, Ax +:ADD^".B:G" src4B, dst4Ax is (b1_0107=0x50 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1; + src:1 = src4B; + setAddFlags(tmp, src); + tmp = tmp + src; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (4) ADD.W:G src, dst +:ADD^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x50 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4; + src:2 = src4W; + setAddFlags(tmp, src); + tmp = tmp + src; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (5) ADD.B:S src, R0H/R0L +:ADD^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x4 & b1_2_reg8) ... & dst2B { + src:1 = dst2B; + setAddFlags(b1_2_reg8, src); + b1_2_reg8 = b1_2_reg8 + src; + setResultFlags(b1_2_reg8); +} + +# (6) ADD.B:G #simm, SP +:ADD^".B:G" srcSimm8, SP is SP & b1_0107=0x3e & b1_size_0=0; b2_0007=0xeb; srcSimm8 { + imm:2 = sext(srcSimm8); + setAddFlags(SP, imm); + SP = SP + imm; + setResultFlags(SP); +} + +# (6) ADD.W:G #simm, SP +:ADD^".W:G" srcSimm16, SP is SP & b1_0107=0x3e & b1_size_0=1; b2_0007=0xeb; srcSimm16 { + setAddFlags(SP, srcSimm16); + SP = SP + srcSimm16; + setResultFlags(SP); +} + +# (7) ADD.W:Q #simm, SP +:ADD^".B:Q" srcSimm4_0003, SP is SP & b1_0007=0x7d; b2_0407=0xb & srcSimm4_0003 { + imm:2 = sext(srcSimm4_0003); + setAddFlags(SP, imm); + SP = SP + imm; + setResultFlags(SP); +} + +### ADJNZ ### + +:ADJNZ.B srcSimm4_0407, dst4B is ((b1_0107=0x7c & b1_size_0=0; srcSimm4_0407) ... & dst4B); rel8offset2 { + tmp:1 = dst4B + srcSimm4_0407; + dst4B = tmp; + if (tmp != 0) goto rel8offset2; +} + +:ADJNZ.B srcSimm4_0407, dst4Ax is ((b1_0107=0x7c & b1_size_0=0; srcSimm4_0407) & $(DST4AX)); rel8offset2 { + tmp:1 = dst4Ax:1 + srcSimm4_0407; + dst4Ax = zext(tmp); + if (tmp != 0) goto rel8offset2; +} + +:ADJNZ.W srcSimm4_0407, dst4W is ((b1_0107=0x7c & b1_size_0=1; srcSimm4_0407) ... & dst4W); rel8offset2 { + tmp:2 = dst4W + sext(srcSimm4_0407); + dst4W = tmp; + if (tmp != 0) goto rel8offset2; +} + +### AND ### + +# (1) AND.B:G #imm, dst +:AND^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x2) ... & dst4B); srcImm8 { + tmp:1 = dst4B & srcImm8; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) AND.B:G #imm, Ax +:AND^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x2) & $(DST4AX)); srcImm8 { + tmp:1 = dst4Ax:1 & srcImm8; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) AND.W:G #imm, dst +:AND^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x2) ... & dst4W); srcImm16 { + tmp:2 = dst4W & srcImm16; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) AND.B:S #imm, dst +:AND^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x12; srcImm8) ... & $(DST3B_AFTER_DSP8) { + tmp:1 = dst3B_afterDsp8 & srcImm8; + dst3B_afterDsp8 = tmp; + setResultFlags(tmp); +} + +# (3) AND.B:G src, dst +:AND^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x48 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4 & src4B; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (3) AND.B:G src, Ax +:AND^".B:G" src4B, dst4Ax is (b1_0107=0x48 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1 & src4B; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) AND.W:G src, dst +:AND^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x48 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4 & src4W; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (4) AND.B:S src, R0L/R0H +:AND^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x2 & b1_2_reg8) ... & dst2B { + tmp:1 = dst2B & b1_2_reg8; + b1_2_reg8 = tmp; + setResultFlags(tmp); +} + +### BAND ### + +# BAND bit,Rx/Ax +:BAND regBit, regBase is (b1_0007=0x7e; b2_0407=0x4 & b2_d4_3=0) ... & regBase ... & regBit { + bitValue:2 = (regBase >> regBit) & 1; + $(CARRY) = $(CARRY) & bitValue:1; +} + +# BAND [Ax] +:BAND memBaseAx is (b1_0007=0x7e; b2_0407=0x4 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + bitValue:1 = (val >> bit) & 1; + $(CARRY) = $(CARRY) & bitValue; +} + +# BAND bit,base +:BAND memBit, memBase is (b1_0007=0x7e; b2_0407=0x4) ... & memBase & memBit { + bitValue:1 = (memBase >> memBit) & 1; + $(CARRY) = $(CARRY) & bitValue; +} + + +### BCLR ### + +# (1) BCLR:G bit,Rx/Ax +:BCLR^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0x8 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = ~(1 << regBit); + regBase = regBase & mask; +} + +# (1) BCLR:G [Ax] +:BCLR^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0x8 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = ~(1 << bit); + *:1 ptr = val & mask; +} + +# (1) BCLR:G bit,base +:BCLR^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0x8) ... & memBase & memBit { + mask:1 = ~(1 << memBit); + memBase = memBase & mask; +} + +# (2) BCLR:S bit,base:11[SB] +:BCLR^":S" b1_bit, memBase11 is (b1_0307=0x08 & b1_bit) ... & memBase11 { + mask:1 = ~(1 << b1_bit); + memBase11 = memBase11 & mask; +} + +### BMcnd ### + +# (1) BMcnd bit,Rx/Ax +:BM^cnd8 regBit, regBase is ((b1_0007=0x7e; b2_0407=0x2 & b2_d4_3=0) ... & regBase ... & regBit); cnd8 { + mask:2 = ~(1 << regBit); + regBase = ((zext(cnd8) << regBit) | (regBase & mask)); +} + +# (1) BMcnd [Ax] +:BM^cnd8 memBaseAx is ((b1_0007=0x7e; b2_0407=0x2 & b2_d4_13=0x3) & memBaseAx); cnd8 { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = ~(1 << bit); + *:1 ptr = ((cnd8 << bit) | (val & mask)); +} + +# (1) BMcnd bit,base +:BM^cnd8 memBit, memBase is ((b1_0007=0x7e; b2_0407=0x2) ... & memBase & memBit); cnd8 { + mask:1 = ~(1 << memBit); + memBase = ((cnd8 << memBit) | (memBase & mask)); +} + +# (2) BMcnd C +:BM^b2cnd4 "C" is b1_0007=0x7d; b2_0407=0xd & b2cnd4 { + $(CARRY) = b2cnd4; +} + +### BNAND ### + +# BNAND bit,Rx/Ax +:BNAND regBit, regBase is (b1_0007=0x7e; b2_0407=0x5 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + $(CARRY) = $(CARRY) && (bitValue == 0); +} + +# BNAND [Ax] +:BNAND memBaseAx is (b1_0007=0x7e; b2_0407=0x5 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + $(CARRY) = $(CARRY) && (bitValue == 0); +} + +# BNAND bit,base +:BNAND memBit, memBase is (b1_0007=0x7e; b2_0407=0x5) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + $(CARRY) = $(CARRY) && (bitValue == 0); +} + +### BNOR ### + +# BNOR bit,Rx/Ax +:BNOR regBit, regBase is (b1_0007=0x7e; b2_0407=0x7 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + $(CARRY) = $(CARRY) || (bitValue == 0); +} + +# BNOR [Ax] +:BNOR memBaseAx is (b1_0007=0x7e; b2_0407=0x7 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + $(CARRY) = $(CARRY) || (bitValue == 0); +} + +# BNOR bit,base +:BNOR memBit, memBase is (b1_0007=0x7e; b2_0407=0x7) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + $(CARRY) = $(CARRY) || (bitValue == 0); +} + +### BNOT ### + +# (1) BNOT:G bit,Rx/Ax +:BNOT^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0xa & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (~regBase & mask); + regBase = (regBase & ~mask) | bitValue; +} + +# (1) BNOT:G [Ax] +:BNOT^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0xa & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (~val & mask); + *:1 ptr = (val & ~mask) | bitValue; +} + +# (1) BNOT:G bit,base +:BNOT^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0xa) ... & memBase & memBit { + mask:1 = (1 << memBit); + val:1 = memBase; + bitValue:1 = (~val & mask); + memBase = (val & ~mask) | bitValue; +} + +# (2) BNOT:S bit,base:11[SB] +:BNOT^":S" b1_bit, memBase11 is (b1_0307=0x0a & b1_bit) ... & memBase11 { + mask:1 = (1 << b1_bit); + val:1 = memBase11; + bitValue:1 = (~val & mask); + memBase11 = (val & ~mask) | bitValue; +} + +### BNTST ### + +# BNTST bit,Rx/Ax +:BNTST regBit, regBase is (b1_0007=0x7e; b2_0407=0x3 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = z; + $(ZERO) = z; +} + +# BNTST [Ax] +:BNTST memBaseAx is (b1_0007=0x7e; b2_0407=0x3 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = z; + $(ZERO) = z; +} + +# BNTST bit,base +:BNTST memBit, memBase is (b1_0007=0x7e; b2_0407=0x3) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = z; + $(ZERO) = z; +} + +### BNXOR ### + +# BNXOR bit,Rx/Ax +:BNXOR regBit, regBase is (b1_0007=0x7e; b2_0407=0xd & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + $(CARRY) = $(CARRY) ^ (bitValue == 0); +} + +# BNXOR [Ax] +:BNXOR memBaseAx is (b1_0007=0x7e; b2_0407=0xd & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + $(CARRY) = $(CARRY) ^ (bitValue == 0); +} + +# BNXOR bit,base +:BNXOR memBit, memBase is (b1_0007=0x7e; b2_0407=0xd) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + $(CARRY) = $(CARRY) ^ (bitValue == 0); +} + +### BOR ### + +# BOR bit,Rx/Ax +:BOR regBit, regBase is (b1_0007=0x7e; b2_0407=0x6 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + $(CARRY) = $(CARRY) || (bitValue != 0); +} + +# BOR [Ax] +:BOR memBaseAx is (b1_0007=0x7e; b2_0407=0x6 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + $(CARRY) = $(CARRY) || (bitValue != 0); +} + +# BOR bit,base +:BOR memBit, memBase is (b1_0007=0x7e; b2_0407=0x6) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + $(CARRY) = $(CARRY) || (bitValue != 0); +} + +### BRK ### + +:BRK is b1_0007=0x0 { + # most likely not necessary to model break behavior + Break(); +} + +### BSET ### + +# (1) BSET:G bit,Rx/Ax +:BSET^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0x9 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + regBase = regBase | mask; +} + +# (1) BSET:G [Ax] +:BSET^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0x9 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + *:1 ptr = val | mask; +} + +# (1) BSET:G bit,base +:BSET^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0x9) ... & memBase & memBit { + mask:1 = (1 << memBit); + memBase = memBase | mask; +} + +# (2) BSET:S bit,base:11[SB] +:BSET^":S" b1_bit, memBase11 is (b1_0307=0x09 & b1_bit) ... & memBase11 { + mask:1 = (1 << b1_bit); + memBase11 = memBase11 | mask; +} + +### BTST ### + +# (1) BTST:G bit,Rx/Ax +:BTST^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0xb & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +# (1) BTST:G [Ax] +:BTST^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0xb & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +# (1) BTST:G bit,base +:BTST^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0xb) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +# (2) BTST:S bit,base:11[SB] +:BTST^":S" b1_bit, memBase11 is (b1_0307=0x0b & b1_bit) ... & memBase11 { + mask:1 = (1 << b1_bit); + bitValue:1 = (memBase11 & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +### BTSTC ### + +# BTSTC bit,Rx/Ax +:BTSTC regBit, regBase is (b1_0007=0x7e; b2_0407=0x0 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + regBase = regBase & ~mask; +} + +# BTSTC [Ax] +:BTSTC memBaseAx is (b1_0007=0x7e; b2_0407=0x0 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + *:1 ptr = val & ~mask; +} + +# BTSTC bit,base +:BTSTC memBit, memBase is (b1_0007=0x7e; b2_0407=0x0) ... & memBase & memBit { + mask:1 = (1 << memBit); + val:1 = memBase; + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + memBase = val & ~mask; +} + +### BTSTS ### + +# BTSTS bit,Rx/Ax +:BTSTS regBit, regBase is (b1_0007=0x7e; b2_0407=0x1 & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + regBase = regBase | mask; +} + +# BTSTS [Ax] +:BTSTS memBaseAx is (b1_0007=0x7e; b2_0407=0x1 & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + *:1 ptr = val | mask; +} + +# BTSTS bit,base +:BTSTS memBit, memBase is (b1_0007=0x7e; b2_0407=0x1) ... & memBase & memBit { + mask:1 = (1 << memBit); + val:1 = memBase; + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + memBase = val | mask; +} + +### BXOR ### + +# BXOR bit,Rx/Ax +:BXOR regBit, regBase is (b1_0007=0x7e; b2_0407=0xc & b2_d4_3=0) ... & regBase ... & regBit { + mask:2 = (1 << regBit); + bitValue:2 = (regBase & mask); + $(CARRY) = $(CARRY) ^ (bitValue != 0); +} + +# BXOR [Ax] +:BXOR memBaseAx is (b1_0007=0x7e; b2_0407=0xc & b2_d4_13=0x3) & memBaseAx { + ptr:3 = zext(memBaseAx >> 3); + bit:1 = memBaseAx:1 & 0x7; + val:1 = *:1 ptr; + mask:1 = (1 << bit); + bitValue:1 = (val & mask); + $(CARRY) = $(CARRY) ^ (bitValue != 0); +} + +# BXOR bit,base +:BXOR memBit, memBase is (b1_0007=0x7e; b2_0407=0xc) ... & memBase & memBit { + mask:1 = (1 << memBit); + bitValue:1 = (memBase & mask); + $(CARRY) = $(CARRY) ^ (bitValue != 0); +} + +### CMP ### + +# (1) CMP.B:G #simm, dst +:CMP^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x8) ... & dst4B); srcSimm8 { + tmp:1 = dst4B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (1) CMP.B:G #simm, Ax +:CMP^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x8) & $(DST4AX)); srcSimm8 { + tmp:1 = dst4Ax:1; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (1) CMP.W:G #simm, dst +:CMP^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x8) ... & dst4W); srcSimm16 { + tmp:2 = dst4W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + setResultFlags(tmp); +} + +# (2) CMP.B:Q #simm4, dst +:CMP^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x68 & b1_size_0=0; srcSimm4_0407) ... & dst4B { + tmp:1 = dst4B; + setSubtractFlags(tmp, srcSimm4_0407); + tmp = tmp - srcSimm4_0407; + setResultFlags(tmp); +} + +# (2) CMP.B:Q #simm4, Ax +:CMP^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x68 & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { + tmp:1 = dst4Ax:1; + setSubtractFlags(tmp, srcSimm4_0407); + tmp = tmp - srcSimm4_0407; + setResultFlags(tmp); +} + +# (2) CMP.W:Q #simm4, dst +:CMP^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x68 & b1_size_0=1; srcSimm4_0407) ... & dst4W { + tmp:2 = dst4W; + imm:2 = sext(srcSimm4_0407); + setSubtractFlags(tmp, imm); + tmp = tmp - imm; + setResultFlags(tmp); +} + +# (3) CMP.B:S #imm, dst +:CMP^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x1c; srcSimm8) ... & $(DST3B_AFTER_DSP8) { + tmp:1 = dst3B_afterDsp8; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (4) CMP.B:G src, dst +:CMP^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x60 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4; + src:1 = src4B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (4) CMP.B:G src, Ax +:CMP^".B:G" src4B, dst4Ax is (b1_0107=0x60 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1; + src:1 = src4B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (4) CMP.W:G src, dst +:CMP^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x60 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4; + src:2 = src4W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (5) CMP.B:S src, R0H/R0L +:CMP^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x7 & b1_2_reg8) ... & dst2B { + src:1 = dst2B; + setSubtractFlags(b1_2_reg8, src); + b1_2_reg8 = b1_2_reg8 - src; + setResultFlags(b1_2_reg8); +} + +### DADC ### + +# (1) DADC.B #imm, R0L +:DADC.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xee; srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(R0L); + tmp:2 = DecimalAddWithCarry(src, dst); + R0L = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADC.W #imm, R0 +:DADC.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xee; srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(R0); + tmp:4 = DecimalAddWithCarry(src, dst); + R0 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (3) DADC.B R0H, R0L +:DADC.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe6 { + src:2 = zext(R0H); + dst:2 = zext(R0L); + tmp:2 = DecimalAddWithCarry(src, dst); + R0L = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (4) DADC.W R1, R0 +:DADC.W R1, R0 is R1 & R0 & b1_0007=0x7d; b2_0007=0xe6 { + src:4 = zext(R1); + dst:4 = zext(R0); + tmp:4 = DecimalAddWithCarry(src, dst); + R0 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +### DADD ### + +# (1) DADD.B #imm, R0L +:DADD.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xec; srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(R0L); + tmp:2 = DecimalAdd(src, dst); + R0L = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADD.W #imm, R0 +:DADD.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xec; srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(R0); + tmp:4 = DecimalAdd(src, dst); + R0 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (3) DADD.B R0H, R0L +:DADD.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe4 { + src:2 = zext(R0H); + dst:2 = zext(R0L); + tmp:2 = DecimalAdd(src, dst); + R0L = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (4) DADD.W R1, R0 +:DADD.W R1, R0 is R1 & R0 & b1_0007=0x7d; b2_0007=0xe4 { + src:4 = zext(R1); + dst:4 = zext(R0); + tmp:4 = DecimalAdd(src, dst); + R0 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +### DEC ### + +# (1) DEC.B dst +:DEC.B dst3B is b1_0307=0x15 ... & $(DST3B) { + dst:1 = dst3B; + setSubtractFlags(dst, 1); + dst = dst - 1; + dst3B = dst; + setResultFlags(dst); +} + +# (2) DEC.W dst +:DEC.W b1_3_regAx is b1_0407=0xf & b1_0002=0x2 & b1_3_regAx { + dst:2 = b1_3_regAx; + setSubtractFlags(dst, 1); + dst = dst - 1; + b1_3_regAx = dst; + setResultFlags(dst); +} + +### DIV ### + +# (1) DIV.B #imm +:DIV.B srcSimm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe1; srcSimm8 { + d:2 = sext(srcSimm8); + q:2 = R0 s/ d; + r:2 = R0 s% d; # remainder has same sign as R0 (dividend) + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIV.W #imm +:DIV.W srcSimm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe1; srcSimm16 { + d:4 = sext(srcSimm16); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIV.B src +:DIV.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xd) ... & dst4B { + d:2 = sext(dst4B); + q:2 = R0 s/ d; + r:2 = R0 s% d; # remainder has same sign as R0 (dividend) + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIV.W src +:DIV.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xd) ... & dst4W { + d:4 = sext(dst4W); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +### DIVU ### + +# (1) DIVU.B #imm +:DIVU.B srcImm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe0; srcImm8 { + d:2 = zext(srcImm8); + q:2 = R0 / d; + r:2 = R0 % d; + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIVU.W #imm +:DIVU.W srcImm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe0; srcImm16 { + d:4 = zext(srcImm16); + q:4 = R2R0 / d; + r:4 = R2R0 % d; + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVU.B src +:DIVU.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xc) ... & dst4B { + d:2 = zext(dst4B); + q:2 = R0 / d; + r:2 = R0 % d; + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVU.W src +:DIVU.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xc) ... & dst4W { + d:4 = zext(dst4W); + q:4 = R2R0 / d; + r:4 = R2R0 % d; + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +### DIVX ### + +# (1) DIVX.B #imm +:DIVX.B srcSimm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe3; srcSimm8 { + d:2 = sext(srcSimm8); + q:2 = R0 s/ d; + r:2 = R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIVX.W #imm +:DIVX.W srcSimm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe3; srcSimm16 { + d:4 = sext(srcSimm16); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVX.B src +:DIVX.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0x9) ... & dst4B { + d:2 = sext(dst4B); + q:2 = R0 s/ d; + r:2 = R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVX.W src +:DIVX.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0x9) ... & dst4W { + d:4 = sext(dst4W); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +### DSBB ### + +# (1) DSBB.B #imm8, R0L +:DSBB.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xef; srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(R0L); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + R0L = tmp:1; + setResultFlags(tmp:1); +} + +# (2) DSBB.W #imm16, R0 +:DSBB.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xef; srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(R0); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + R0 = tmp:2; + setResultFlags(tmp:2); +} + +# (3) DSBB.B R0H, R0L +:DSBB.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe7 { + src:2 = zext(R0H); + dst:2 = zext(R0L); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + R0L = tmp:1; + setResultFlags(tmp:1); +} + +# (4) DSBB.W R1, R0 +:DSBB.W R1, R0 is R0 & R1 & b1_0007=0x7d; b2_0007=0xe7 { + src:4 = zext(R1); + dst:4 = zext(R0); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + R0 = tmp:2; + setResultFlags(tmp:2); +} + +### DSUB ### + +# (1) DSUB.B #imm8, R0L +:DSUB.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xed; srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(R0L); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + R0L = tmp:1; + setResultFlags(tmp:1); +} + +# (2) DSUB.W #imm16, R0 +:DSUB.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xed; srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(R0); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + R0 = tmp:2; + setResultFlags(tmp:2); +} + +# (3) DSUB.B R0H, R0L +:DSUB.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe5 { + src:2 = zext(R0H); + dst:2 = zext(R0L); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + R0L = tmp:1; + setResultFlags(tmp:1); +} + +# (4) DSUB.W R1, R0 +:DSUB.W R1, R0 is R0 & R1 & b1_0007=0x7d; b2_0007=0xe5 { + src:4 = zext(R1); + dst:4 = zext(R0); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + R0 = tmp:2; + setResultFlags(tmp:2); +} + +### ENTER ### + +:ENTER srcImm8 is b1_0007=0x7c; b2_0007=0xf2; srcImm8 { + push2(FB); + FB = SP; + SP = SP - zext(srcImm8); +} + +### EXITD ### + +:EXITD is b1_0007=0x7d; b2_0007=0xf2 { + SP = FB; + pop2(FB); + pc:3 = 0; + pop3(pc); + return [pc]; +} + +### EXTS ### + +# (1) EXTS.B dst +:EXTS.B dst4B is (b1_0007=0x7c; b2_0407=0x6) ... & dst4B & dst4W { + tmp:2 = sext(dst4B); + dst4W = tmp; + setResultFlags(tmp); +} + +# (1) EXTS.B Ax +:EXTS.B dst4Ax is (b1_0007=0x7c; b2_0407=0x6) & $(DST4AX) { + tmp:2 = sext(dst4Ax:1); + dst4Ax = tmp; + setResultFlags(tmp); +} + +# (2) EXTS.W R0 +:EXTS.W R0 is R0 & b1_0007=0x7c; b2_0007=0xf3 { + tmp:4 = sext(R0); + R2R0 = tmp; + setResultFlags(tmp); +} + +### FCLR ### + +:FCLR flagBit is b1_0007=0xeb; b2_0707=0 & flagBit & b2_0003=0x5 { + mask:2 = ~(1 << flagBit); + FLG = FLG & mask; +} + +### FSET ### + +:FSET flagBit is b1_0007=0xeb; b2_0707=0 & flagBit & b2_0003=0x4 { + mask:2 = (1 << flagBit); + FLG = FLG | mask; +} + +### INC ### + +# (1) INC.B dst +:INC.B dst3B is b1_0307=0x14 ... & $(DST3B) { + tmp:1 = dst3B + 1; + dst3B = tmp; + setResultFlags(tmp); +} + +# (2) INC.W dst +:INC.W b1_3_regAx is b1_0407=0xb & b1_0002=0x2 & b1_3_regAx { + tmp:2 = b1_3_regAx + 1; + b1_3_regAx = tmp; + setResultFlags(tmp); +} + +### INT ### + +:INT srcIntNum is b1_0007=0xeb; imm8_0607=3 & srcIntNum { + push1(FLG:1); + next:3 = inst_next; + push3(next); + ptr3:3 = (INTB + (zext(srcIntNum) * 0x4)); + pc:3 = *:3 ptr3; + $(STACK_SEL) = ((srcIntNum > 0x1f) * $(STACK_SEL)); + $(INTERRUPT) = 0x0; + $(DEBUG) = 0x0; + call [pc]; +} + +##### INTO ##### + +:INTO is b1_0007=0xf6 { + if ($(OVERFLOW) == 0) goto inst_next; + push1(FLG:1); + next:3 = inst_next; + push3(next); + $(STACK_SEL) = 0; + $(INTERRUPT) = 0x0; + $(DEBUG) = 0x0; + call 0x0fffe0; +} + +### JCnd ### + +# (1) JCnd3 dsp8 +:J^b1cnd3 rel8offset1 is b1_0307=0x0d & b1cnd3; rel8offset1 { + if (b1cnd3) goto rel8offset1; +} + +# (2) JCnd4 dsp8 +:J^b2cnd3 rel8offset2 is b1_0007=0x7d; b2_0407=0xc & b2_0303=1 & b2cnd3; rel8offset2 { + if (b2cnd3) goto rel8offset2; +} + +### JMP ### + +# (1) JMP.S dsp3 +:JMP.S rel3offset2 is b1_0307=0x0c & rel3offset2 { + goto rel3offset2; +} + +# (2) JMP.B dsp8 +:JMP.B rel8offset1 is b1_0007=0xfe; rel8offset1 { + goto rel8offset1; +} + +# (3) JMP.W dsp16 +:JMP.W rel16offset1 is b1_0007=0xf4; rel16offset1 { + goto rel16offset1; +} + +# (4) JMP.A abs20 +:JMP.A abs20offset is b1_0007=0xfc; abs20offset { + goto abs20offset; +} + +### JMPI ### + +# JMPI.W dst +:JMPI.W reloffset_dst4W is (b1_0007=0x7d; b2_0407=0x2) ... & reloffset_dst4W { + goto reloffset_dst4W; +} + +# JMPI.A dst (dst=register) +:JMPI.A reloffset_dst4L is (b1_0007=0x7d; b2_0407=0x0) ... & reloffset_dst4L { + goto reloffset_dst4L; +} + +# JMPI.A dst (dst=memory) +:JMPI.A reloffset_dst4T is (b1_0007=0x7d; b2_0407=0x0) ... & reloffset_dst4T { + goto reloffset_dst4T; +} + +### JMPS ### + +:JMPS srcImm8 is b1_0007=0xee; srcImm8 { + # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) + ptr:3 = 0x0ffffe - (zext(srcImm8) << 1); + pc:3 = 0x0f0000 | zext(*:2 ptr); + goto [pc]; +} + +### JSR ### + +:JSR.W rel16offset1 is b1_0007=0xf5; rel16offset1 { + next:3 = inst_next; + push3(next); + call rel16offset1; +} + +:JSR.A abs20offset is b1_0007=0xfd; abs20offset { + next:3 = inst_next; + push3(next); + call abs20offset; +} + +### JSRI ### + +# JSRI.W dst +:JSRI.W reloffset_dst4W is (b1_0007=0x7d; b2_0407=0x3) ... & reloffset_dst4W { + next:3 = inst_next; + push3(next); + call reloffset_dst4W; +} + +# JSRI.A dst (dst=register) +:JSRI.A dst4L is (b1_0007=0x7d; b2_0407=0x1) ... & dst4L { + next:3 = inst_next; + push3(next); + pc:3 = dst4L:3; + call [pc]; +} + +# JSRI.A dst (dst=memory) +:JSRI.A dst4T is (b1_0007=0x7d; b2_0407=0x1) ... & $(DST4T) { + next:3 = inst_next; + push3(next); + pc:3 = dst4T; + call [pc]; +} + +### JSRS ### + +:JSRS srcImm8 is b1_0007=0xef; srcImm8 { + # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) + next:3 = inst_next; + push3(next); + ptr:3 = 0x0ffffe - (zext(srcImm8) << 1); + pc:3 = 0x0f0000 | zext(*:2 ptr); + call [pc]; +} + +### LDC ### + +:LDC srcImm16, b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x0; srcImm16 { + b2_creg16 = srcImm16; +} + +:LDC dst4W, b2_creg16 is (b1_0007=0x7a; b2_0707=1 & b2_creg16) ... & dst4W { + b2_creg16 = dst4W; +} + +### LDCTX ### + +:LDCTX abs16offset, abs20offset is b1_0007=0x7c; b2_0007=0xf0; abs16offset; imm20_dat & abs20offset { + + taskNum:1 = abs16offset; # load task number stored at abs16 + ptr:3 = imm20_dat + (zext(taskNum) * 2); # compute table entry address relative to abs20 + regInfo:1 = *:1 ptr; + ptr = ptr + 1; + spCorrect:1 = *:1 ptr; + + ptr = zext(SP); + + if ((regInfo & 1) == 0) goto ; + R0 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R1 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R2 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R3 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + A0 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + A1 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + SB = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + FB = *:2 ptr; + ptr = ptr + 2; + + SP = SP + zext(spCorrect); +} + +### LDE ### + +# (1) LDE.B abs20, dst +:LDE.B abs20offset, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x8) ... & dst4B); abs20offset { + tmp:1 = abs20offset; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) LDE.B abs20, Ax +:LDE.B abs20offset, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x8) & $(DST4AX)); abs20offset { + tmp:1 = abs20offset; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) LDE.W abs20, dst +:LDE.W abs20offsetW, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x8) ... & dst4W); abs20offsetW { + tmp:2 = abs20offsetW; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) LDE.B dsp20, dst +:LDE.B dsp20A0B, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x9) ... & dst4B); dsp20A0B { + tmp:1 = dsp20A0B; + dst4B = tmp; + setResultFlags(tmp); +} + +# (2) LDE.B dsp20, Ax +:LDE.B dsp20A0B, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x9) & $(DST4AX)); dsp20A0B { + tmp:1 = dsp20A0B; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) LDE.W dsp20, dst +:LDE.W dsp20A0W, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x9) ... & dst4W); dsp20A0W { + tmp:2 = dsp20A0W; + dst4W = tmp; + setResultFlags(tmp); +} + +# (3) LDE.B [A1A0], dst +:LDE.B [A1A0], dst4B is (A1A0 & b1_0107=0x3a & b1_size_0=0; b2_0407=0xa) ... & dst4B { + ptr:3 = A1A0:3; + tmp:1 = *:1 ptr; + dst4B = tmp; + setResultFlags(tmp); +} + +# (3) LDE.B [A1A0], Ax +:LDE.B [A1A0], dst4Ax is (A1A0 & b1_0107=0x3a & b1_size_0=0; b2_0407=0xa) & $(DST4AX) { + ptr:3 = A1A0:3; + tmp:1 = *:1 ptr; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) LDE.W [A1A0], dst +:LDE.W [A1A0], dst4W is (A1A0 & b1_0107=0x3a & b1_size_0=1; b2_0407=0xa) ... & dst4W { + ptr:3 = A1A0:3; + tmp:2 = *:2 ptr; + dst4W = tmp; + setResultFlags(tmp); +} + +### LDINTB ### +# LDINTB operand value +ldIntbVal: "#"^val is b1_0007; b2_0007; b3_0003; b4_0007; b5_0007; b6_0007; imm16_dat [ val = (b3_0003 << 16) + imm16_dat; ] { + export *[const]:3 val; +} + +# NOTE: Although this is documented as a macro for two LDE instructions, the encoding is different ?? +:LDINTB ldIntbVal is (b1_0007=0xeb; b2_0007=0x20; b3_0407=0x0; b4_0007=0x0; b5_0007=0xeb; b6_0007=0x10) ... & ldIntbVal { + INTB = ldIntbVal; +} + +### LDIPL ### + +:LDIPL srcImm3 is b1_0007=0x7d; b2_0307=0x14 & srcImm3 { + $(IPL) = srcImm3; +} + +### MOV ### + +# (1) MOV.B:G #imm, dst +:MOV^".B:G" srcImm8, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xc) ... & dst4B); srcImm8 { + val:1 = srcImm8; + dst4B = val; + setResultFlags(val); +} + +# (1) MOV.B:G #imm, Ax +:MOV^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xc) & $(DST4AX)); srcImm8 { + val:1 = srcImm8; + dst4Ax = zext(val); + setResultFlags(val); +} + +# (1) MOV.W:G #imm, dst +:MOV^".W:G" srcImm16, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0xc) ... & dst4W); srcImm16 { + val:2 = srcImm16; + dst4W = val; + setResultFlags(val); +} + +# (2) MOV.B:Q #simm4, dst +:MOV^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x6c & b1_size_0=0; srcSimm4_0407) ... & dst4B { + val:1 = srcSimm4_0407; + dst4B = val; + setResultFlags(val); +} + +# (2) MOV.B:Q #simm4, Ax +:MOV^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x6c & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { + val:1 = srcSimm4_0407; + dst4Ax = zext(val); + setResultFlags(val); +} + +# (2) MOV.W:Q #simm4, dst +:MOV^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x6c & b1_size_0=1; srcSimm4_0407) ... & dst4W { + val:2 = sext(srcSimm4_0407); + dst4W = val; + setResultFlags(val); +} + +# (3) MOV.B:S #imm, dst +:MOV^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x18; srcImm8) ... & $(DST3B_AFTER_DSP8) { + val:1 = srcImm8; + dst3B_afterDsp8 = val; + setResultFlags(val); +} + +# (4) MOV.B:S #imm, dst +:MOV^".B:S" srcImm8, b1_3_regAx is b1_0407=0xe & b1_3_regAx & b1_0002=0x2; srcImm8 { + val:1 = srcImm8; + b1_3_regAx = zext(val); + setResultFlags(val); +} + +# (4) MOV.W:S #imm, Ax +:MOV^".W:S" srcImm16, b1_3_regAx is b1_0407=0xa & b1_3_regAx & b1_0002=0x2; srcImm16 { + val:2 = srcImm16; + b1_3_regAx = val; + setResultFlags(val); +} + +# (5) MOV.B:Z #0, dst +:MOV^".B:Z" srcZero8, dst3B is (srcZero8 & b1_0307=0x16) ... & $(DST3B) { + dst3B = 0; + $(SIGN) = 0; + $(ZERO) = 1; +} + +# (6) MOV.B:G src, dst +:MOV^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x39 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + val:1 = src4B; + dst4B_afterSrc4 = val; + setResultFlags(val); +} + +# (6) MOV.B:G src, Ax +:MOV^".B:G" src4B, dst4Ax is (b1_0107=0x39 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + val:1 = src4B; + dst4Ax = zext(val); + setResultFlags(val); +} + +# (6) MOV.W:G src, dst +:MOV^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x39 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + val:2 = src4W; + dst4W_afterSrc4 = val; + setResultFlags(val); +} + +# (7) MOV.B:S src, Ax +:MOV^".B:S" dst2B, b1_2_regAx is (b1_0307=0x06 & b1_2_regAx) ... & dst2B { + val:1 = dst2B; + b1_2_regAx = zext(val); + setResultFlags(val); +} + +# (8) MOV.B:S R0H/R0L, dst +# TODO: Is it really necessary to exclude R0H/R0L as valid destination ?? +:MOV^".B:S" b1_2_reg8, dst2B is (b1_0307=0x0 & b1_2_reg8) ... & dst2B { + val:1 = b1_2_reg8; + dst2B = val; + setResultFlags(val); +} + +# (9) MOV.B:S src, R0H/R0L +:MOV^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x1 & b1_2_reg8) ... & dst2B { + val:1 = dst2B; + b1_2_reg8 = val; + setResultFlags(val); +} + +# (10) MOV.B:G dsp:8[SP], dst +:MOV^".B:G" dsp8spB, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xb) ... & dst4B); dsp8spB { + val:1 = dsp8spB; + dst4B = val; + setResultFlags(val); +} + +# (10) MOV.B:G dsp:8[SP], Ax +:MOV^".B:G" dsp8spB, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xb) & $(DST4AX)); dsp8spB { + val:1 = dsp8spB; + dst4Ax = zext(val); + setResultFlags(val); +} + +# (10) MOV.W:G dsp:8[SP], dst +:MOV^".W:G" dsp8spW, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0xb) ... & dst4W); dsp8spW { + val:2 = dsp8spW; + dst4W = val; + setResultFlags(val); +} + +# (11) MOV.B:G src, dsp:8[SP] +:MOV^".B:G" dst4B, dsp8spB is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x3) ... & dst4B); dsp8spB { + val:1 = dst4B; + dsp8spB = val; + setResultFlags(val); +} + +# (11) MOV.W:G src, dsp:8[SP] +:MOV^".W:G" dst4W, dsp8spW is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x3) ... & dst4W); dsp8spW { + val:2 = dst4W; + dsp8spW = val; + setResultFlags(val); +} + +### MOVA ### + +:MOVA dst4A, b2_reg16 is (b1_0007=0xeb; b2_0707=0 & b2_reg16) ... & $(DST4A) { + b2_reg16 = dst4A:2; +} + +### MOVDir ### + +# TODO: dst4B=Ax/R0L cases will parse but are not valid + +# (1) MOVDir R0L, dst +:MOVLL R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0x8) ... & dst4B { + dst4B = (R0L & 0x0f) | (dst4B & 0xf0); +} +:MOVHL R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0x9) ... & dst4B { + dst4B = ((R0L & 0xf0) >> 4) | (dst4B & 0xf0); +} +:MOVLH R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0xa) ... & dst4B { + dst4B = ((R0L & 0x0f) << 4) | (dst4B & 0x0f); +} +:MOVHH R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0xb) ... & dst4B { + dst4B = (R0L & 0xf0) | (dst4B & 0x0f); +} + +# (1) MOVDir dst, R0L +:MOVLL dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x0) ... & dst4B { + R0L = (dst4B & 0x0f) | (R0L & 0xf0); +} +:MOVHL dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x1) ... & dst4B { + R0L = ((dst4B & 0xf0) >> 4) | (R0L & 0xf0); +} +:MOVLH dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x2) ... & dst4B { + R0L = ((dst4B & 0x0f) << 4) | (R0L & 0x0f); +} +:MOVHH dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x3) ... & dst4B { + R0L = (dst4B & 0xf0) | (R0L & 0x0f); +} + +### MUL ### + +# TODO: Illegal MUL destination cases will parse but are not valid (e.g., R0H, R2, R1H, R3) + +# (1) MUL.B #imm, dst +:MUL.B srcSimm8, dst4B is ((b1_0107=0x3e & b1_size_0=0; b2_0407=0x5) ... & dst4B & dst4W); srcSimm8 { + dst4W = sext(srcSimm8) * sext(dst4B); +} + +# (1) MUL.W #imm, dst +:MUL.W srcSimm16, dst4W is ((b1_0107=0x3e & b1_size_0=1; b2_0407=0x5) ... & dst4W & dst4L); srcSimm16 { + dst4L = sext(srcSimm16) * sext(dst4W); +} + +# (2) MUL.B src, dst +:MUL.B src4B, dst4B_afterSrc4 is (b1_0107=0x3c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... & dst4W_afterSrc4 ... { + dst4W_afterSrc4 = sext(src4B) * sext(dst4B_afterSrc4); +} + +# (2) MUL.W src, dst +:MUL.W src4W, dst4W_afterSrc4 is (b1_0107=0x3c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... & dst4L_afterSrc4 ... { + dst4L_afterSrc4 = sext(src4W) * sext(dst4W_afterSrc4); +} + +### MULU ### + +# TODO: Illegal MULU destination cases will parse but are not valid (e.g., R0H, R2, R1H, R3) + +# (1) MULU.B #imm, dst +:MULU.B srcImm8, dst4B is ((b1_0107=0x3e & b1_size_0=0; b2_0407=0x4) ... & dst4B & dst4W); srcImm8 { + dst4W = zext(srcImm8) * zext(dst4B); +} + +# (1) MULU.W #imm, dst +:MULU.W srcImm16, dst4W is ((b1_0107=0x3e & b1_size_0=1; b2_0407=0x4) ... & dst4W & dst4L); srcImm16 { + dst4L = zext(srcImm16) * zext(dst4W); +} + +# (2) MULU.B src, dst +:MULU.B src4B, dst4B_afterSrc4 is (b1_0107=0x38 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... & dst4W_afterSrc4 ... { + dst4W_afterSrc4 = zext(src4B) * zext(dst4B_afterSrc4); +} + +# (2) MULU.W src, dst +:MULU.W src4W, dst4W_afterSrc4 is (b1_0107=0x38 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... & dst4L_afterSrc4 ... { + dst4L_afterSrc4 = zext(src4W) * zext(dst4W_afterSrc4); +} + +### NEG ### + +# (1) NEG.B dst +:NEG.B dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x5) ... & dst4B { + tmp:1 = dst4B; + setSubtractFlags(0:1, tmp); + tmp = -tmp; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) NEG.W dst +:NEG.W dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x5) ... & dst4W { + tmp:2 = dst4W; + setSubtractFlags(0:2, tmp); + tmp = -tmp; + dst4W = tmp; + setResultFlags(tmp); +} + +### NOP ### + +:NOP is b1_0007=0x04 { +} + +### NOT ### + +# (1) NOT.B dst +:NOT.B dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x7) ... & dst4B { + tmp:1 = ~dst4B; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) NOT.W dst +:NOT.W dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x7) ... & dst4W { + tmp:2 = ~dst4W; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) NOT.B:S dst +:NOT^".B:S" dst3B is (b1_0307=0x17) ... & $(DST3B) { + tmp:1 = ~dst3B; + dst3B = tmp; + setResultFlags(tmp); +} + +### OR ### + +# (1) OR.B:G #imm, dst +:OR^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x3) ... & dst4B); srcImm8 { + tmp:1 = dst4B | srcImm8; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) OR.B:G #imm, Ax +:OR^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x3) & $(DST4AX)); srcImm8 { + tmp:1 = dst4Ax:1 | srcImm8; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) OR.W:G #imm, dst +:OR^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x3) ... & dst4W); srcImm16 { + tmp:2 = dst4W | srcImm16; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) OR.B:S #imm, dst +:OR^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x13; srcImm8) ... & $(DST3B_AFTER_DSP8) { + tmp:1 = dst3B_afterDsp8 | srcImm8; + dst3B_afterDsp8 = tmp; + setResultFlags(tmp); +} + +# (3) OR.B:G src, dst +:OR^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x4c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4 | src4B; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (3) OR.B:G src, Ax +:OR^".B:G" src4B, dst4Ax is (b1_0107=0x4c & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1 | src4B; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) OR.W:G src, dst +:OR^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x4c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4 | src4W; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (4) OR.B:S src, R0L/R0H +:OR^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x3 & b1_2_reg8) ... & dst2B { + tmp:1 = dst2B | b1_2_reg8; + b1_2_reg8 = tmp; + setResultFlags(tmp); +} + +### POP ### + +# (1) POP.B:G dst +:POP^".B:G" dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0xd) ... & dst4B { + pop1(dst4B); +} + +# (1) POP.B:G Ax +:POP^".B:G" dst4Ax is (b1_0107=0x3a & b1_size_0=0; b2_0407=0xd) & $(DST4AX) { + val:1 = 0; + pop1(val); + dst4Ax = zext(val); +} + +# (1) POP.W:G dst +:POP^".W:G" dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0xd) ... & dst4W { + pop2(dst4W); +} + +# (2) POP.B:S R0L/R0H +:POP^".B:S" b1_3_reg8 is b1_0407=0x9 & b1_3_reg8 & b1_0002=0x2 { + pop1(b1_3_reg8); +} + +# (3) POP.W:S Ax +:POP^".W:S" b1_3_regAx is b1_0407=0xd & b1_3_regAx & b1_0002=0x2 { + pop2(b1_3_regAx); +} + +### POPC ### + +:POPC b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x3 { + pop2(b2_creg16); +} + +### POPM ### +popRegFB: FB is regBit7=1 & FB { pop2(FB); } +popRegFB: is regBit7=0 { } + +popRegSB: SB popRegFB is regBit6=1 & popRegFB & SB { pop2(SB); build popRegFB; } +popRegSB: popRegFB is popRegFB { build popRegFB; } + +popRegA1: A1 popRegSB is regBit5=1 & popRegSB & A1 { pop2(A1); build popRegSB; } +popRegA1: popRegSB is popRegSB { build popRegSB; } +popRegA0: A0 popRegA1 is regBit4=1 & popRegA1 & A0 { pop2(A0); build popRegA1; } +popRegA0: popRegA1 is popRegA1 { build popRegA1; } + +popRegR3: R3 popRegA0 is regBit3=1 & popRegA0 & R3 { pop2(R3); build popRegA0; } +popRegR3: popRegA0 is popRegA0 { build popRegA0; } +popRegR2: R2 popRegR3 is regBit2=1 & popRegR3 & R2 { pop2(R2); build popRegR3; } +popRegR2: popRegR3 is popRegR3 { build popRegR3; } +popRegR1: R1 popRegR2 is regBit1=1 & popRegR2 & R1 { pop2(R1); build popRegR2; } +popRegR1: popRegR2 is popRegR2 { build popRegR2; } +popRegR0: R0 popRegR1 is regBit0=1 & popRegR1 & R0 { pop2(R0); build popRegR1; } +popRegR0: popRegR1 is popRegR1 { build popRegR1; } + +popRegList: "( "^popRegR0^")" is popRegR0 { build popRegR0; } + +:POPM popRegList is b1_0007=0xed; popRegList { + build popRegList; +} + +### PUSH ### + +# (1) PUSH.B:G #imm +:PUSH^".B:G" srcImm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe2; srcImm8 { + push1(srcImm8); +} + +# (1) PUSH.W:G #imm +:PUSH^".W:G" srcImm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe2; srcImm16 { + push2(srcImm16); +} + +# (2) PUSH.B:G src +:PUSH^".B:G" dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x4) ... & dst4B { + push1(dst4B); +} + +# (2) PUSH.W:G src +:PUSH^".W:G" dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x4) ... & dst4W { + push2(dst4W); +} + +# (3) PUSH.B:S R0H/R0L +:PUSH^".B:S" b1_3_reg8 is b1_0407=0x8 & b1_3_reg8 & b1_0002=0x2 { + push1(b1_3_reg8); +} + +# (4) PUSH.W:S Ax +:PUSH^".W:S" b1_3_regAx is b1_0407=0xc & b1_3_regAx & b1_0002=0x2 { + push2(b1_3_regAx); +} + +### PUSHA ### + +:PUSHA dst4A is (b1_0007=0x7d; b2_0407=0x9) ... & $(DST4A) { + push2(dst4A:2); +} + +### PUSHC ### + +:PUSHC b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x2 { + push2(b2_creg16); +} + +### PUSHM ### +pushRegR0: R0 is regBit7=1 & R0 { push2(R0); } +pushRegR0: is regBit7=0 { } +pushRegR1: pushRegR0 R1 is regBit6=1 & pushRegR0 & R1 { push2(R1); build pushRegR0; } +pushRegR1: pushRegR0 is pushRegR0 { build pushRegR0; } +pushRegR2: pushRegR1 R2 is regBit5=1 & pushRegR1 & R2 { push2(R2); build pushRegR1; } +pushRegR2: pushRegR1 is pushRegR1 { build pushRegR1; } +pushRegR3: pushRegR2 R3 is regBit4=1 & pushRegR2 & R3 { push2(R3); build pushRegR2; } +pushRegR3: pushRegR2 is pushRegR2 { build pushRegR2; } + +pushRegA0: pushRegR3 A0 is regBit3=1 & pushRegR3 & A0 { push3(A0); build pushRegR3; } +pushRegA0: pushRegR3 is pushRegR3 { build pushRegR3; } +pushRegA1: pushRegA0 A1 is regBit2=1 & pushRegA0 & A1 { push3(A1); build pushRegA0; } +pushRegA1: pushRegA0 is pushRegA0 { build pushRegA0; } + +pushRegSB: pushRegA1 SB is regBit1=1 & pushRegA1 & SB { push3(SB); build pushRegA1; } +pushRegSB: pushRegA1 is pushRegA1 { build pushRegA1; } + +pushRegFB: pushRegSB FB is regBit0=1 & pushRegSB & FB { push3(FB); build pushRegSB; } +pushRegFB: pushRegSB is pushRegSB { build pushRegSB; } + +pushRegList: "("^pushRegFB^" )" is pushRegFB { build pushRegFB; } + +:PUSHM pushRegList is b1_0007=0xec; pushRegList { + build pushRegList; +} + +### REIT ### + +:REIT is b1_0007=0xfb { + pc:3 = 0; + pop3(pc); + f:1 = 0; + pop1(f); + FLG = zext(f); # TODO: Not sure what state upper FLG bits should be in ?? + return [pc]; +} + +### RMPA ### + +:RMPA.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xf1 { + if (R3 == 0) goto inst_next; + ptr0:3 = zext(A0); + ptr1:3 = zext(A1); + a:1 = *:1 ptr0; + b:1 = *:1 ptr1; + A0 = A0 + 1; + A1 = A1 + 1; + prod:2 = sext(a) * sext(b); + o:1 = scarry(R0, prod); + $(OVERFLOW) = o | $(OVERFLOW); + R0 = R0 + prod; + R3 = R3 - 1; + goto inst_start; +} + +:RMPA.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xf1 { + if (R3 == 0) goto inst_next; + ptr0:3 = zext(A0); + ptr1:3 = zext(A1); + a:2 = *:2 ptr0; + b:2 = *:2 ptr1; + A0 = A0 + 2; + A1 = A1 + 2; + prod:4 = sext(a) * sext(b); + o:1 = scarry(R2R0, prod); + $(OVERFLOW) = o | $(OVERFLOW); + R2R0 = R2R0 + prod; + R3 = R3 - 1; + goto inst_start; +} + +### ROLC ### + +:ROLC.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xa) ... & dst4B { + c:1 = $(CARRY); + tmp:1 = dst4B; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst4B = tmp; + setResultFlags(tmp); +} + +:ROLC.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xa) & $(DST4AX) { + c:1 = $(CARRY); + tmp:1 = dst4Ax:1; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +:ROLC.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xa) ... & dst4W { + c:2 = zext($(CARRY)); + tmp:2 = dst4W; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst4W = tmp; + setResultFlags(tmp); +} + +### RORC ### + +:RORC.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xb) ... & dst4B { + c:1 = $(CARRY); + tmp:1 = dst4B; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 7); + dst4B = tmp; + setResultFlags(tmp); +} + +:RORC.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xb) & $(DST4AX) { + c:1 = $(CARRY); + tmp:1 = dst4Ax:1; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 7); + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +:RORC.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xb) ... & dst4W { + c:2 = zext($(CARRY)); + tmp:2 = dst4W; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 15); + dst4W = tmp; + setResultFlags(tmp); +} + +### ROT ### + +# (1) ROT.B #imm, dst (right) +:ROT.B srcSimm4Shift_0407, dst4B is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { + rightShift:1 = -srcSimm4Shift_0407; + tmp:1 = dst4B; + $(CARRY) = (tmp >> (rightShift - 1)) & 1; + tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) ROT.B #imm, Ax (right) +:ROT.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { + rightShift:1 = -srcSimm4Shift_0407; + tmp:1 = dst4Ax:1; + $(CARRY) = (tmp >> (rightShift - 1)) & 1; + tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ROT.W #imm, dst (right) +:ROT.W srcSimm4Shift_0407, dst4W is (b1_0107=0x70 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { + rightShift:1 = -srcSimm4Shift_0407; + tmp:2 = dst4W; + c:2 = (tmp >> (rightShift - 1)); + $(CARRY) = c:1 & 1; + tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); + dst4W = tmp; + setResultFlags(tmp); +} + +# (1) ROT.B #imm, dst (left) +:ROT.B srcSimm4Shift_0407, dst4B is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { + leftShift:1 = srcSimm4Shift_0407; + tmp:1 = dst4B; + $(CARRY) = (tmp >> (8 - leftShift)) & 1; + tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) ROT.B #imm, Ax (left) +:ROT.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { + leftShift:1 = srcSimm4Shift_0407; + tmp:1 = dst4Ax:1; + $(CARRY) = (tmp >> (8 - leftShift)) & 1; + tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ROT.W #imm, dst (left) +:ROT.W srcSimm4Shift_0407, dst4W is (b1_0107=0x70 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { + leftShift:1 = srcSimm4Shift_0407; + tmp:2 = dst4W; + c:2 = (tmp >> (16 - leftShift)); + $(CARRY) = c:1 & 1; + tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) ROT.B R1H, dst +:ROT.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0x6) ... & dst4B { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 8; + tmp:1 = dst4B; + if (shift s>= 0) goto ; + shift = -shift; + $(CARRY) = (tmp >> (shift - 1)) & 1; + tmp = (tmp >> shift) | (tmp << (8 - shift)); + goto ; + + $(CARRY) = (tmp >> (8 - shift)) & 1; + tmp = (tmp << shift) | (tmp >> (8 - shift)); + + dst4B = tmp; + setResultFlags(tmp); +} + +# (2) ROT.B R1H, Ax +:ROT.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0x6) & $(DST4AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 8; + tmp:1 = dst4Ax:1; + if (shift s>= 0) goto ; + shift = -shift; + $(CARRY) = (tmp >> (shift - 1)) & 1; + tmp = (tmp >> shift) | (tmp << (8 - shift)); + goto ; + + $(CARRY) = (tmp >> (8 - shift)) & 1; + tmp = (tmp << shift) | (tmp >> (8 - shift)); + + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ROT.W R1H, dst +:ROT.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0x6) ... & dst4W { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 16; + tmp:2 = dst4W; + if (shift s>= 0) goto ; + shift = -shift; + c:2 = (tmp >> (shift - 1)); + tmp = (tmp >> shift) | (tmp << (16 - shift)); + goto ; + + c = (tmp >> (16 - shift)); + tmp = (tmp << shift) | (tmp >> (16 - shift)); + + $(CARRY) = c:1 & 1; + dst4W = tmp; + setResultFlags(tmp); +} + +### RTS ### + +:RTS is b1_0007=0xf3 { + pc:3 = 0; + pop3(pc); + return [pc]; +} + +### SBB ### + +# (1) SBB.B #imm, dst +:SBB.B srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x7) ... & dst4B); srcSimm8 { + tmp:1 = dst4B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, srcSimm8, c); + tmp = tmp - srcSimm8 - c; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) SBB.B #imm, Ax +:SBB.B srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x7) & $(DST4AX)); srcSimm8 { + tmp:1 = dst4Ax:1; + c:1 = $(CARRY); + setSubtract3Flags(tmp, srcSimm8, c); + tmp = tmp - srcSimm8 - c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) SBB.W #imm, dst +:SBB.W srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x7) ... & dst4W); srcSimm16 { + tmp:2 = dst4W; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, srcSimm16, c); + tmp = tmp - srcSimm16 - c; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) SBB.B src, dst +:SBB.B src4B, dst4B_afterSrc4 is (b1_0107=0x5c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4; + s:1 = src4B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (2) SBB.B src, Ax +:SBB.B src4B, dst4Ax is (b1_0107=0x5c & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1; + s:1 = src4B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) SBB.W src, dst +:SBB.W src4W, dst4W_afterSrc4 is (b1_0107=0x5c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4; + s:2 = src4W; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + +##### SBJNZ - PSUEDO-OP! SAME AS ADJNZ ##### +### SHA ### +macro SHAsetShiftRightFlags(val,shift,result) { + local c = (val >> (shift - 1)) & 1; + $(CARRY) = c:1; + local mask = ~(-(1 << shift)); + allOnes:1 = (mask & val) == mask; + allZeros:1 = (mask & val) == 0; + $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); + setResultFlags(result); +} + +macro SHAsetShiftLeftFlags(val,shift,result,sze) { + local c = (val >> (sze - shift)) & 1; + $(CARRY) = c:1; + local mask = -(1 << shift); + allOnes:1 = (mask & val) == mask; + allZeros:1 = (mask & val) == 0; + $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); + setResultFlags(result); +} + +# (1) SHA.B #imm4, dst (right) +:SHA.B srcSimm4Shift_0407, dst4B is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { + val:1 = dst4B; + shift:1 = -srcSimm4Shift_0407; + tmp:1 = val s>> shift; + dst4B = tmp; + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.B #imm4, Ax (right) +:SHA.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { + val:1 = dst4Ax:1; + shift:1 = -srcSimm4Shift_0407; + tmp:1 = val s>> shift; + dst4Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.W #imm4, dst (right) +:SHA.W srcSimm4Shift_0407, dst4W is (b1_0107=0x78 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { + val:2 = dst4W; + shift:1 = -srcSimm4Shift_0407; + tmp:2 = val s>> shift; + dst4W = tmp; + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.B #imm4, dst (left) +:SHA.B srcSimm4Shift_0407, dst4B is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { + val:1 = dst4B; + shift:1 = srcSimm4Shift_0407; + tmp:1 = val << shift; + dst4B = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHA.B #imm4, Ax (left) +:SHA.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { + val:1 = dst4Ax:1; + shift:1 = srcSimm4Shift_0407; + tmp:1 = val << shift; + dst4Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHA.W #imm4, dst (left) +:SHA.W srcSimm4Shift_0407, dst4W is (b1_0107=0x78 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { + val:2 = dst4W; + shift:1 = srcSimm4Shift_0407; + tmp:2 = val << shift; + dst4W = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (2) SHA.B R1H, dst +:SHA.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xf) ... & dst4B { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst4B; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val s>> shift; + dst4B = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4B = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (2) SHA.B R1H, Ax +:SHA.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xf) & $(DST4AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst4Ax:1; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val s>> shift; + dst4Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (2) SHA.W R1H, dst +:SHA.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0xf) ... & dst4W { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst4W; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val s>> shift; + dst4W = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4W = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (3) SHA.L #imm4, R2R0/R3R1 (right) +:SHA.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x5 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=1 { + val:4 = b2_reg32; + shift:1 = -srcSimm4Shift_0003; + tmp:4 = val s>> shift; + b2_reg32 = tmp; + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (3) SHA.L #imm4, R2R0/R3R1 (left) +:SHA.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x5 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=0 { + val:4 = b2_reg32; + shift:1 = srcSimm4Shift_0003; + tmp:4 = val << shift; + b2_reg32 = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (4) SHA.L R1H, R2R0/R3R1 +:SHA.L R1H, b2_reg32 is R1H & b1_0007=0xeb; b2_0507=0x1 & b2_reg32 & b2_0003=0x1 { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = b2_reg32; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val s>> shift; + b2_reg32 = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + b2_reg32 = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 32); +} + +### SHL ### +macro SHLsetShiftRightFlags(val,shift,result) { + local c = (val >> (shift - 1)) & 1; + $(CARRY) = c:1; + setResultFlags(result); +} + +macro SHLsetShiftLeftFlags(val,shift,result,sze) { + local c = (val >> (sze - shift)) & 1; + $(CARRY) = c:1; + setResultFlags(result); +} + +# (1) SHL.B #imm4, dst (right) +:SHL.B srcSimm4Shift_0407, dst4B is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { + val:1 = dst4B; + shift:1 = -srcSimm4Shift_0407; + tmp:1 = val >> shift; + dst4B = tmp; + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.B #imm4, Ax (right) +:SHL.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { + val:1 = dst4Ax:1; + shift:1 = -srcSimm4Shift_0407; + tmp:1 = val >> shift; + dst4Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.W #imm4, dst (right) +:SHL.W srcSimm4Shift_0407, dst4W is (b1_0107=0x74 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { + val:2 = dst4W; + shift:1 = -srcSimm4Shift_0407; + tmp:2 = val >> shift; + dst4W = tmp; + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.B #imm4, dst (left) +:SHL.B srcSimm4Shift_0407, dst4B is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { + val:1 = dst4B; + shift:1 = srcSimm4Shift_0407; + tmp:1 = val << shift; + dst4B = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHL.B #imm4, Ax (left) +:SHL.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { + val:1 = dst4Ax:1; + shift:1 = srcSimm4Shift_0407; + tmp:1 = val << shift; + dst4Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHL.W #imm4, dst (left) +:SHL.W srcSimm4Shift_0407, dst4W is (b1_0107=0x74 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { + val:2 = dst4W; + shift:1 = srcSimm4Shift_0407; + tmp:2 = val << shift; + dst4W = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (2) SHL.B R1H, dst +:SHL.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xe) ... & dst4B { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst4B; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val >> shift; + dst4B = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4B = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (2) SHL.B R1H, Ax +:SHL.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xe) & $(DST4AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst4Ax:1; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val >> shift; + dst4Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (2) SHL.W R1H, dst +:SHL.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0xe) ... & dst4W { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst4W; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val >> shift; + dst4W = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst4W = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (3) SHL.L #imm4, R2R0/R3R1 (right) +:SHL.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x4 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=1 { + val:4 = b2_reg32; + shift:1 = -srcSimm4Shift_0003; + tmp:4 = val >> shift; + b2_reg32 = tmp; + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (3) SHL.L #imm4, R2R0/R3R1 (left) +:SHL.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x4 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=0 { + val:4 = b2_reg32; + shift:1 = srcSimm4Shift_0003; + tmp:4 = val << shift; + b2_reg32 = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (4) SHL.L R1H, R2R0/R3R1 +:SHL.L R1H, b2_reg32 is R1H & b1_0007=0xeb; b2_0507=0x0 & b2_reg32 & b2_0003=0x1 { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = b2_reg32; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val >> shift; + b2_reg32 = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + b2_reg32 = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 32); +} + +### SMOVB ### + +:SMOVB.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe9 { + if (R3 == 0) goto inst_next; + ptr0:3 = (zext(R1H) << 16) + zext(A0); + ptr1:3 = zext(A1); + *:1 ptr1 = *:1 ptr0; + A1 = A1 - 1; + ptr0 = ptr0 - 1; + A0 = ptr0:2; + R1H = ptr0(2); + R3 = R3 - 1; + goto inst_start; +} + +:SMOVB.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe9 { + if (R3 == 0) goto inst_next; + ptr0:3 = (zext(R1H) << 16) + zext(A0); + ptr1:3 = zext(A1); + *:2 ptr1 = *:2 ptr0; + A1 = A1 - 2; + ptr0 = ptr0 - 2; + A0 = ptr0:2; + R1H = ptr0(2); + R3 = R3 - 1; + goto inst_start; +} + +### SMOVF ### + +:SMOVF.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe8 { + if (R3 == 0) goto inst_next; + ptr0:3 = (zext(R1H) << 16) + zext(A0); + ptr1:3 = zext(A1); + *:1 ptr1 = *:1 ptr0; + A1 = A1 + 1; + ptr0 = ptr0 + 1; + A0 = ptr0:2; + R1H = ptr0(2); + R3 = R3 - 1; + goto inst_start; +} + +:SMOVF.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe8 { + if (R3 == 0) goto inst_next; + ptr0:3 = (zext(R1H) << 16) + zext(A0); + ptr1:3 = zext(A1); + *:2 ptr1 = *:2 ptr0; + A1 = A1 + 2; + ptr0 = ptr0 + 2; + A0 = ptr0:2; + R1H = ptr0(2); + R3 = R3 - 1; + goto inst_start; +} + +### SSTR ### + +:SSTR.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xea { + if (R3 == 0) goto inst_next; + ptr1:3 = zext(A1); + *:1 ptr1 = R0L; + A1 = A1 + 1; + R3 = R3 - 1; + goto inst_start; +} + +:SSTR.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xea { + if (R3 == 0) goto inst_next; + ptr1:3 = zext(A1); + *:2 ptr1 = R0; + A1 = A1 + 2; + R3 = R3 - 1; + goto inst_start; +} + +### STC ### + +# (1) STC src, dst +:STC b2_creg16, dst4W is (b1_0007=0x7b; b2_0707=1 & b2_creg16) ... & dst4W { + dst4W = b2_creg16; +} + +# (2) STC PC, dst (dst=register) +:STC PC, dst4L is (PC & b1_0007=0x7c; b2_0407=0xc) ... & dst4L { + dst4L = zext(PC); +} + +# (2) STC PC, dst (dst=memory) +:STC PC, dst4T is (PC & b1_0007=0x7c; b2_0407=0xc) ... & $(DST4T) { + dst4T = inst_next; # PC value refers to next instruction address +} + +### STCTX ### + +:STCTX abs16offset, abs20offset is b1_0007=0xb6; b2_0007=0xd3; abs16offset; imm20_dat & abs20offset { + + taskNum:1 = abs16offset; # load task number stored at abs16 + ptr:3 = imm20_dat + (zext(taskNum) * 2); # compute table entry address relative to abs20 + regInfo:1 = *:1 ptr; + ptr = ptr + 1; + spCorrect:1 = *:1 ptr; + + ptr = zext(SP); + + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = FB; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = SB; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = A1; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = A0; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R3; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R2; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R1; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R0; + + SP = SP - zext(spCorrect); +} + +### STE ### + +# (1) STE.B src, abs20 +:STE.B dst4B, abs20offset is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0) ... & dst4B); abs20offset { + val:1 = dst4B; + abs20offset = val; + setResultFlags(val); +} + +# (1) STE.W src, abs20 +:STE.W dst4W, abs20offsetW is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0) ... & dst4W); abs20offsetW { + val:2 = dst4W; + abs20offsetW = val; + setResultFlags(val); +} + +# (2) STE.B src, dsp:20[A0] +:STE.B dst4B, dsp20A0B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x1) ... & dst4B); dsp20A0B { + val:1 = dst4B; + dsp20A0B = val; + setResultFlags(val); +} + +# (2) STE.W src, dsp:20[A0] +:STE.W dst4W, dsp20A0W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x1) ... & dst4W); dsp20A0W { + val:2 = dst4W; + dsp20A0W = val; + setResultFlags(val); +} + +steA1A0B: "["^A1A0^"]" is A1A0 { ptr:3 = A1A0:3; export *:1 ptr; } + +steA1A0W: "["^A1A0^"]" is A1A0 { ptr:3 = A1A0:3; export *:2 ptr; } + +# (3) STE.B src, [A1A0] +:STE.B dst4B, steA1A0B is (steA1A0B & b1_0107=0x3a & b1_size_0=0; b2_0407=0x2) ... & dst4B { + val:1 = dst4B; + steA1A0B = val; + setResultFlags(val); +} + +# (3) STE.W src, [A1A0] +:STE.W dst4W, steA1A0W is (steA1A0W & b1_0107=0x3a & b1_size_0=1; b2_0407=0x2) ... & dst4W { + val:2 = dst4W; + steA1A0W = val; + setResultFlags(val); +} + +### STNZ ### + +:STNZ srcImm8, dst3B_afterDsp8 is (b1_0307=0x1a; srcImm8) ... & $(DST3B_AFTER_DSP8) { + if ($(ZERO) != 0) goto inst_next; + dst3B_afterDsp8 = srcImm8; +} + +### STZ ### + +:STZ srcImm8, dst3B_afterDsp8 is (b1_0307=0x19; srcImm8) ... & $(DST3B_AFTER_DSP8) { + if ($(ZERO) == 0) goto inst_next; + dst3B_afterDsp8 = srcImm8; +} + +### STZX ### +skipBytesBeforeImm82: is b1_0007; imm8_dat { } # imm81 +skipBytesBeforeImm82: is b1_d3=0x5; imm16_dat { } # imm81; dsp8 +skipBytesBeforeImm82: is b1_d3=0x6; imm16_dat { } # imm81; dsp8 +skipBytesBeforeImm82: is b1_d3=0x7; imm24_dat { } # imm81; abs16 + +stzxImm82: "#"^imm8_dat is skipBytesBeforeImm82; imm8_dat { export *[const]:1 imm8_dat; } + +:STZX srcImm8, stzxImm82, dst3B_afterDsp8 is (b1_0307=0x1b; srcImm8) ... & $(DST3B_AFTER_DSP8) ... & stzxImm82 { + z:1 = $(ZERO); + dst3B_afterDsp8 = (z * srcImm8) + (!z * stzxImm82); +} + +### SUB ### + +# (1) SUB.B:G #simm, dst +:SUB^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x5) ... & dst4B); srcSimm8 { + tmp:1 = dst4B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) SUB.B:G #simm, Ax +:SUB^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x5) & $(DST4AX)); srcSimm8 { + tmp:1 = dst4Ax:1; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) SUB.W:G #simm, dst +:SUB^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x5) ... & dst4W); srcSimm16 { + tmp:2 = dst4W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) SUB.B:S #simm, dst +:SUB^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x11; srcSimm8) ... & $(DST3B_AFTER_DSP8) { + tmp:1 = dst3B_afterDsp8; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst3B_afterDsp8 = tmp; + setResultFlags(tmp); +} + +# (3) SUB.B:G src, dst +:SUB^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x54 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4; + src:1 = src4B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (3) SUB.B:G src, Ax +:SUB^".B:G" src4B, dst4Ax is (b1_0107=0x54 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1; + src:1 = src4B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) SUB.W:G src, dst +:SUB^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x54 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4; + src:2 = src4W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (4) SUB.B:S src, R0H/R0L +:SUB^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x5 & b1_2_reg8) ... & dst2B { + tmp:1 = b1_2_reg8; + src:1 = dst2B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + b1_2_reg8 = tmp; + setResultFlags(tmp); +} + +### TST ### + +# (1) TST.B #imm, dst +:TST.B srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x0) ... & dst4B); srcImm8 { + tmp:1 = dst4B & srcImm8; + setResultFlags(tmp); +} + +# (1) TST.W #imm, dst +:TST.W srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x0) ... & dst4W); srcImm16 { + tmp:2 = dst4W & srcImm16; + setResultFlags(tmp); +} + +# (2) TST.B src, dst +:TST.B src4B, dst4B_afterSrc4 is (b1_0107=0x40 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4 & src4B; + setResultFlags(tmp); +} + +# (2) TST.W src, dst +:TST.W src4W, dst4W_afterSrc4 is (b1_0107=0x40 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4 & src4W; + setResultFlags(tmp); +} + +##### UND ##### +# Don't implement this "Undefined" instruction +# :UND is b1_0007=0xff + +### WAIT ### + +:WAIT is b1_0007=0x7d; b2_0007=0xf3 { + Wait(); +} + +### XCHG ### + +:XCHG.B b2_s4_reg8, dst4B is (b1_0107=0x3d & b1_size_0=0; b2_0607=0 & b2_s4_reg8) ... & dst4B { + tmp:1 = dst4B; + dst4B = b2_s4_reg8; + b2_s4_reg8 = tmp; +} + +:XCHG.B b2_s4_reg8, dst4Ax is (b1_0107=0x3d & b1_size_0=0; b2_0607=0 & b2_s4_reg8) & $(DST4AX) { + tmp:1 = dst4Ax:1; + dst4Ax = zext(b2_s4_reg8); + b2_s4_reg8 = tmp; +} + +:XCHG.W b2_s4_reg16, dst4W is (b1_0107=0x3d & b1_size_0=1; b2_0607=0 & b2_s4_reg16) ... & dst4W { + tmp:2 = dst4W; + dst4W = b2_s4_reg16; + b2_s4_reg16 = tmp; +} + +### XOR ### + +# (1) XOR.B:G #imm, dst +:XOR^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x1) ... & dst4B); srcImm8 { + tmp:1 = dst4B ^ srcImm8; + dst4B = tmp; + setResultFlags(tmp); +} + +# (1) XOR.B:G #imm, Ax +:XOR^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x1) & $(DST4AX)); srcImm8 { + tmp:1 = dst4Ax:1 ^ srcImm8; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) XOR.W:G #imm, dst +:XOR^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x1) ... & dst4W); srcImm16 { + tmp:2 = dst4W ^ srcImm16; + dst4W = tmp; + setResultFlags(tmp); +} + +# (2) XOR.B:G src, dst +:XOR^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x44 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { + tmp:1 = dst4B_afterSrc4 ^ src4B; + dst4B_afterSrc4 = tmp; + setResultFlags(tmp); +} + +# (2) XOR.B:G src, Ax +:XOR^".B:G" src4B, dst4Ax is (b1_0107=0x44 & b1_size_0=0) ... & src4B & $(DST4AX) ... { + tmp:1 = dst4Ax:1 ^ src4B; + dst4Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) XOR.W:G src, dst +:XOR^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x44 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { + tmp:2 = dst4W_afterSrc4 ^ src4W; + dst4W_afterSrc4 = tmp; + setResultFlags(tmp); +} diff --git a/pypcode/processors/M16C/data/languages/M16C_80.cspec b/pypcode/processors/M16C/data/languages/M16C_80.cspec new file mode 100644 index 00000000..dda14808 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_80.cspec @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/M16C/data/languages/M16C_80.ldefs b/pypcode/processors/M16C/data/languages/M16C_80.ldefs new file mode 100644 index 00000000..87812174 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_80.ldefs @@ -0,0 +1,20 @@ + + + + + + Renesas M16C/80 16-Bit MicroComputer + + + + diff --git a/pypcode/processors/M16C/data/languages/M16C_80.pspec b/pypcode/processors/M16C/data/languages/M16C_80.pspec new file mode 100644 index 00000000..caf732d1 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_80.pspec @@ -0,0 +1,296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pypcode/processors/M16C/data/languages/M16C_80.slaspec b/pypcode/processors/M16C/data/languages/M16C_80.slaspec new file mode 100644 index 00000000..9acb2b71 --- /dev/null +++ b/pypcode/processors/M16C/data/languages/M16C_80.slaspec @@ -0,0 +1,5050 @@ +# +# Renesas M16C/80 16-Bit MicroComputer +# + +# +# Memory Architecture +# +define endian=little; + +define alignment=1; + +define space RAM type=ram_space size=3 default; +define space register type=register_space size=2; + +# +# General Registers +# +define register offset=0x0000 size=2 [ + R0 R2 R1 R3 +]; + +define register offset=0x0000 size=1 [ + R0L R0H _ _ R1L R1H _ _ +]; + +define register offset=0x0000 size=4 [ + R2R0 R3R1 +]; + +define register offset=0x0000 size=6 [ + R1R2R0 +]; + +define register offset=0x2000 size=3 [ + A0 A1 +]; + +define register offset=0x3000 size=3 [ + PC # Program Counter + SVP # Save PC Register + VCT # Vector Register + byteIndexOffset # Byte offset for memory (see useByteIndex) + bitIndex # Index offset for bit operations (see useBitIndex) +]; + +define register offset=0x4000 size=3 [ + INTB +]; + +define register offset=0x4000 size=2 [ + INTBL INTBH +]; + +define register offset=0x5000 size=3 [ + SP # Stack Pointer (Represents active stack pointer: ISP or USP) + FB # Frame Base Register + SB # Static Base Register + ISP # Interrupt Stack Pointer +]; + +define register offset=0x6000 size=2 [ + FLG # Flag Register + SVF # Save Flag Register +]; + +@define CARRY "FLG[0,1]" +@define DEBUG "FLG[1,1]" +@define ZERO "FLG[2,1]" +@define SIGN "FLG[3,1]" +@define REG_BANK "FLG[4,1]" +@define OVERFLOW "FLG[5,1]" +@define INTERRUPT "FLG[6,1]" +@define STACK_SEL "FLG[7,1]" +@define IPL "FLG[12,3]" + +define register offset=0x7000 size=2 [ + # These are really 1-Byte registers + DMD0 # DMA mode register + DMD1 # DMA mode register +]; + +define register offset=0x8000 size=2 [ + DCT0 # DMA transfer count register + DCT1 # DMA transfer count register + DRC0 # DMA transfer count reload register + DRC1 # DMA transfer count reload register +]; + +define register offset=0x9000 size=3 [ + DMA0 # DMA memory address register + DMA1 # DMA memory address register + DSA0 # DMA SFR address register + DSA1 # DMA SFR address register + DRA0 # DMA memory address reload register + DRA1 # DMA memory address reload register +]; + +# Define context bits +define register offset=0xA000 size=4 contextreg; + +define context contextreg + useBitIndex = (0, 0) noflow # =1 use bitIndex instead of bit specified by instruction + useByteIndexOffset = (1, 2) noflow + useSrcByteIndexOffset = (1, 1) noflow + useDstByteIndexOffset = (2, 2) noflow + + # transient context: + phase = (3, 4) # guard for saving off modes before starting instructions + indDst = (5, 5) # =1 indirect destination + indSrc = (6, 6) # =1 indirect source + dstFollowsSrc = (7, 8) # =1 destination add-on data follows 5-bit encoded source add-on data + # =2 destination add-on data follows 8-bit data +; + +define token b0(8) + b0_0007 = (0,7) +; + +define token b1(8) + b1_s5 = (4,6) + b1_s5_4 = (6,6) + b1_d5 = (1,3) + b1_d5_4 = (3,3) + b1_d2 = (4,5) + b1_d1_regAx = (0,0) + b1_size_5 = (5,5) + b1_size_4 = (4,4) + b1_size_0 = (0,0) + b1_0707 = (7,7) + b1_0607 = (6,7) + b1_0507 = (5,7) + b1_0505 = (5,5) + b1_0407 = (4,7) + b1_0406 = (4,6) + b1_0405 = (4,5) + b1_0104 = (1,4) + b1_0103 = (1,3) + b1_0007 = (0,7) + b1_0000 = (0,0) +; + +define token b2(8) + b2_d5_reg8 = (6,7) + b2_s5_reg8 = (4,5) + b2_d5_reg16 = (6,7) + b2_s5_reg16 = (4,5) + b2_d5_reg32 = (6,6) # only d0 used to select double register + b2_s5_reg32 = (4,4) # only s0 used to select double register + b2_d5_regAxSF = (6,7) # selects A0, A1, SB or FB + b2_s5_regAxSF = (4,5) # selects A0, A1, SB or FB + b2_d5_regAx = (6,6) + b2_s5_regAx = (4,4) + b2_d5 = (6,7) + b2_s5 = (4,5) + b2_d5_1 = (7,7) + b2_d5_0 = (6,6) + b2_s5_1 = (5,5) + b2_s5_0 = (4,4) + b2_0707 = (7,7) + b2_0606 = (6,6) + b2_0405 = (4,5) + b2_0307 = (3,7) + b2_0305 = (3,5) + b2_0105 = (1,5) + b2_0102 = (1,2) + b2_0101 = (1,1) + b2_0007 = (0,7) + b2_0005 = (0,5) + b2_0003 = (0,3) + b2_0002 = (0,2) + b2_simm4 = (0,3) signed + b2_shiftSign = (3,3) + b2_bit = (0,2) + b2_reg8 = (0,2) + b2_reg16 = (0,2) + b2_creg16 = (0,2) + b2_creg24 = (0,2) + b2_dreg24 = (0,2) + b2_reg32 = (0,0) + b2_regAx = (0,0) +; + +define token imm8(8) + simm8_dat = (0,7) signed + imm8_dat = (0,7) + imm6_dat = (2,7) + cnd_dat = (0,3) + imm8_0001 = (0,1) + regBit7 = (7,7) + regBit6 = (6,6) + regBit5 = (5,5) + regBit4 = (4,4) + regBit3 = (3,3) + regBit2 = (2,2) + regBit1 = (1,1) + regBit0 = (0,0) +; + +define token imm16(16) + simm16_dat = (0,15) signed + imm16_dat = (0,15) +; + +define token imm24(24) + simm24_dat = (0,23) signed + imm24_dat = (0,23) +; + +define token imm32(32) + simm32_dat = (0,31) signed + imm32_dat = (0,31) +; + +attach variables [ b2_s5_reg32 b2_d5_reg32 ] [ R2R0 R3R1 ]; +attach variables [ b2_s5_reg16 b2_d5_reg16 ] [ R2 R3 R0 R1 ]; +attach variables [ b2_s5_reg8 b2_d5_reg8 ] [ R0H R1H R0L R1L ]; +attach variables [ b2_s5_regAx b2_d5_regAx b1_d1_regAx b2_regAx ] [ A0 A1 ]; +attach variables [ b2_s5_regAxSF b2_d5_regAxSF ] [ A0 A1 SB FB ]; +attach variables [ b2_creg16 ] [ DCT0 DCT1 FLG SVF DRC0 DRC1 DMD0 DMD1 ]; +attach variables [ b2_creg24 ] [ INTB SP SB FB SVP VCT _ ISP ]; +attach variables [ b2_dreg24 ] [ _ _ DMA0 DMA1 DRA0 DRA1 DSA0 DSA1 ]; +attach variables [ b2_reg32 ] [ R2R0 R3R1 ]; + +# XCHG register attach +attach variables [ b2_reg8 ] [ R0L R1L _ _ R0H R1H _ _ ]; +attach variables [ b2_reg16 ] [ R0 R1 _ _ R2 R3 _ _ ]; + +# +# PCode Op +# +define pcodeop Break; # BRK +define pcodeop Break2; # BRK2 +define pcodeop DecimalAdd; # DADD +define pcodeop DecimalAddWithCarry; # DADC +define pcodeop DecimalSubtractWithBorrow; # DSBB +define pcodeop DecimalSubtract; # DSUB +define pcodeop Wait; # WAIT + +# +# FLAG MACROS... +# +# Set zero and sign flags from result +macro setResultFlags(result) { + $(SIGN) = (result s< 0x0); + $(ZERO) = (result == 0x0); +} + +# Set carry and overflow flags for addition +macro setAdd3Flags(v1, v2, v3) { + local add13 = v1 + v3; + $(CARRY) = carry(v1,v3) || carry(v2,add13); + $(OVERFLOW) = scarry(v1,v3) || scarry(v2,add13); +} + +# Set carry and overflow flags for addition +macro setAddFlags(v1, v2) { + $(CARRY) = carry(v1, v2); + $(OVERFLOW) = scarry(v1, v2); +} + +# Set overflow flags for subtraction of op3,op2 from op1 (op1-op2-op3) +macro setSubtract3Flags(v1, v2, v3) { + local add12 = v1 - v2; + $(CARRY) = (v1 >= v2) || (add12 >= v3); + $(OVERFLOW) = sborrow(v1, v2) || sborrow(add12, v3); +} + +# Set overflow flags for subtraction of op2 from op1 (op1-op2) +macro setSubtractFlags(v1, v2) { + $(CARRY) = (v1 s>= v2); + $(OVERFLOW) = sborrow(v1, v2); +} + +macro push1(val) { + SP = SP - 2; + *:1 SP = val; +} + +macro push2(val) { + SP = SP - 2; + *:2 SP = val; +} + +macro push3(val) { + SP = SP - 4; + *:3 SP = val; +} + +macro push4(val) { + SP = SP - 4; + *:4 SP = val; +} + +macro pop1(val) { + val = *:1 SP; + SP = SP + 2; +} + +macro pop2(val) { + val = *:2 SP; + SP = SP + 2; +} + +macro pop3(val) { + val = *:3 SP; + SP = SP + 4; +} + +macro pop4(val) { + val = *:4 SP; + SP = SP + 4; +} + +:^instruction is phase=0 & b0_0007 & instruction [ phase=1; ] {} +:^instruction is phase=0 & b0_0007=0x09; instruction [ indDst=1; phase=1; ] {} # indirect destination prefix +:^instruction is phase=0 & b0_0007=0x41; instruction [ indSrc=1; phase=1; ] {} # indirect source prefix +:^instruction is phase=0 & b0_0007=0x49; instruction [ indDst=1; indSrc=1; phase=1; ] {} # indirect source and destination prefix + +# +# Source operand location data +# +# Obtain additional source byte offset as a result of an INDEX instruction (flagged by useSrcByteIndexOffset context bit) +srcIndexOffset: is useSrcByteIndexOffset=0 { export 0:3; } +srcIndexOffset: is useSrcByteIndexOffset=1 { export byteIndexOffset; } + +# Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement +src5dsp8: imm8_dat^":8" is b1_s5; b2_s5; imm8_dat { export *[const]:3 imm8_dat; } +src5dsp8: simm8_dat^":8" is b1_s5; b2_s5=0x3; simm8_dat { export *[const]:3 simm8_dat; } + +src5dsp16: imm16_dat^":16" is b1_s5; b2_s5; imm16_dat { export *[const]:3 imm16_dat; } +src5dsp16: simm16_dat^":16" is b1_s5; b2_s5=0x3; simm16_dat { export *[const]:3 simm16_dat; } + +src5dsp24: imm24_dat^":24" is b1_s5; b2_s5; imm24_dat { export *[const]:3 imm24_dat; } +src5dsp24: simm24_dat^":24" is b1_s5; b2_s5=0x3; simm24_dat { export *[const]:3 simm24_dat; } + +# src5... Handle 5-bit encoded Source specified by b1_s(3-bits) and b2_s(2-bits) +# Variable length pattern starting at instruction byte b1 +# associated src5 add-on data immediately follows instruction byte b2 +# abs16 and abs24 cases are broken out differently to facilitate export of constant addresses in certain cases +# 1-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +src5B: b2_s5_reg8 is b1_s5=0x4; b2_s5_reg8 { export b2_s5_reg8; } # Rx +src5B: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:1 = b2_s5_regAx:1; export tmp; } # Ax +src5B: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:1 ptr; } # [Ax] - w/ indirect prefix +src5B: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:1 ptr; } # [Ax] +src5B: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [[Ax]] +src5B: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:1 ptr; } # dsp:8[Ax|SB|FB] +src5B: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:8[Ax|SB|FB]] +src5B: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:1 ptr; } # dsp:16[Ax|SB|FB] +src5B: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:16[Ax|SB|FB]] +src5B: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:1 ptr; } # dsp:24[Ax] +src5B: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:24[Ax]] +src5B: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:1 ptr; } # abs16 (+byteIndexOffset) +src5B: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:1 imm16_dat; } # abs16 (special constant address case) +src5B: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs16] +src5B: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:1 ptr; } # abs24 (+byteIndexOffset) +src5B: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:1 imm24_dat; } # abs24 (special constant address case) +src5B: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs24] + +# 2-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +src5W: b2_s5_reg16 is b1_s5=0x4; b2_s5_reg16 { export b2_s5_reg16; } # Rx +src5W: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:2 = b2_s5_regAx:2; export tmp; } # Ax +src5W: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:2 ptr; } # [Ax] - w/ indirect prefix +src5W: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:2 ptr; } # [Ax] +src5W: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [[Ax]] +src5W: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:2 ptr; } # dsp:8[Ax|SB|FB] +src5W: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:8[Ax|SB|FB]] +src5W: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:2 ptr; } # dsp:16[Ax|SB|FB] +src5W: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:16[Ax|SB|FB]] +src5W: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:2 ptr; } # dsp:24[Ax] +src5W: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:24[Ax]] +src5W: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:2 ptr; } # abs16 (+byteIndexOffset) +src5W: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:2 imm16_dat; } # abs16 (special constant address case) +src5W: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs16] +src5W: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:2 ptr; } # abs24 (+byteIndexOffset) +src5W: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:2 imm24_dat; } # abs24 (special constant address case) +src5W: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs24] + +# 4-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +src5L: b2_s5_reg32 is b1_s5=0x4; b2_s5_1=1 & b2_s5_reg32 { export b2_s5_reg32; } # Rx +src5L: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:4 = zext(b2_s5_regAx); export tmp; } # Ax +src5L: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:4 ptr; } # [Ax] - w/ indirect prefix +src5L: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:4 ptr; } # [Ax] +src5L: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [[Ax]] +src5L: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:4 ptr; } # dsp:8[Ax|SB|FB] +src5L: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:8[Ax|SB|FB]] +src5L: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:4 ptr; } # dsp:16[Ax|SB|FB] +src5L: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:16[Ax|SB|FB]] +src5L: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:4 ptr; } # dsp:24[Ax] +src5L: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:24[Ax]] +src5L: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:4 ptr; } # abs16 (+byteIndexOffset) +src5L: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:4 imm16_dat; } # abs16 (special constant address case) +src5L: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs16] +src5L: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:4 ptr; } # abs24 (+byteIndexOffset) +src5L: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:4 imm24_dat; } # abs24 (special constant address case) +src5L: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs24] + +# +# The following macros are used to elliminate illegal bit patterns when using src5 +# These should be used by constructor pattern matching instead of the corresponding src5 subconstructor +# +@define SRC5B "((b1_s5=4 | b1_s5_4=0) ... & src5B)" +@define SRC5W "((b1_s5=4 | b1_s5_4=0) ... & src5W)" +@define SRC5L "((b1_s5=4 | b1_s5_4=0) ... & src5L)" + +# +# Destination operand location data (may also be used as a source in certain cases) +# +# Skip instruction and source add-on bytes which occur before destination add-on bytes +# Starting position is at b1 +skipBytesBeforeDst5: is b1_s5; b2_s5 { } +skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=1; b2_s5; imm8_dat { } # src5: dsp8 +skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=2; b2_s5; imm16_dat { } # src5: dsp16 +skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=3; b2_s5; imm24_dat { } # src5: dsp24/abs24 +skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=3; b2_s5=3; imm16_dat { } # src5: abs16 +skipBytesBeforeDst5: is dstFollowsSrc=2 & b1_d5; b2_d5; imm8_dat { } # dsp8 + +# Obtain additional destination byte offset as a result of an INDEX instruction (flagged by useDstByteIndexOffset context bit) +dstIndexOffset: is useDstByteIndexOffset=0 { export 0:3; } +dstIndexOffset: is useDstByteIndexOffset=1 { export byteIndexOffset; } + +# Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement +dst5dsp8: imm8_dat^":8" is (skipBytesBeforeDst5; imm8_dat) { export *[const]:3 imm8_dat; } +dst5dsp8: simm8_dat^":8" is (b1_d5; b2_d5=0x3) ... & (skipBytesBeforeDst5; simm8_dat) { export *[const]:3 simm8_dat; } +dst5dsp16: imm16_dat^":16" is (skipBytesBeforeDst5; imm16_dat) { export *[const]:3 imm16_dat; } +dst5dsp16: simm16_dat^":16" is (b1_d5; b2_d5=0x3) ... & (skipBytesBeforeDst5; simm16_dat) { export *[const]:3 simm16_dat; } +dst5dsp24: imm24_dat^":24" is (skipBytesBeforeDst5; imm24_dat) { export *[const]:3 imm24_dat; } + +# dst5... Handle 5-bit encoded Destination specified by b1_d5(3-bits) and b2_d5(2-bits) +# Ax direct case is read-only! Instruction must use dst5Ax for write/update case +# Variable length pattern starting at instruction byte b1 +# abs16 and abs24 cases are broken out differently to facilitate export of constant addresses in certain cases +# 1-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +dst5B: b2_d5_reg8 is b1_d5=0x4; b2_d5_reg8 { export b2_d5_reg8; } # Rx +dst5B: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:1 = b2_d5_regAx:1; export tmp; } # Ax - read-only use ! +dst5B: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:1 ptr; } # [Ax] - w/ indirect prefix +dst5B: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:1 ptr; } # [Ax] +dst5B: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [[Ax]] +dst5B: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:1 ptr; } # dsp:8[Ax|SB|FB] +dst5B: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:8[Ax|SB|FB]] +dst5B: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:1 ptr; } # dsp:16[Ax|SB|FB] +dst5B: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:16[Ax|SB|FB]] +dst5B: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:1 ptr; } # dsp:24[Ax] +dst5B: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:24[Ax]] +dst5B: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:1 ptr; } # abs16 (+byteIndexOffset) +dst5B: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:1 imm16_dat; } # abs16 (special constant address case) +dst5B: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs16] +dst5B: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:1 ptr; } # abs24 +dst5B: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:1 imm24_dat; } # abs24 (special constant address case) +dst5B: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs24] + +# 2-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +dst5W: b2_d5_reg16 is b1_d5=0x4; b2_d5_reg16 { export b2_d5_reg16; } # Rx +dst5W: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:2 = b2_d5_regAx:2; export tmp; } # Ax - read-only use ! +dst5W: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:2 ptr; } # [Ax] - w/ indirect prefix +dst5W: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:2 ptr; } # [Ax] +dst5W: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [[Ax]] +dst5W: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:2 ptr; } # dsp:8[Ax|SB|FB] +dst5W: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:8[Ax|SB|FB]] +dst5W: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:2 ptr; } # dsp:16[Ax|SB|FB] +dst5W: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:16[Ax|SB|FB]] +dst5W: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:2 ptr; } # dsp:24[Ax] +dst5W: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:24[Ax]] +dst5W: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:2 ptr; } # abs16 (+byteIndexOffset) +dst5W: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:2 imm16_dat; } # abs16 (special constant address case) +dst5W: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs16] +dst5W: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:2 ptr; } # abs24 +dst5W: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:2 imm24_dat; } # abs24 (special constant address case) +dst5W: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs24] + +# 4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset +dst5L: b2_d5_reg32 is b1_d5=0x4; b2_d5_1=1 & b2_d5_reg32 { export b2_d5_reg32; } # Rx +dst5L: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:4 = zext(b2_d5_regAx); export tmp; } # Ax - read-only use ! +dst5L: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:4 ptr; } # [Ax] - w/ indirect prefix +dst5L: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:4 ptr; } # [Ax] +dst5L: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [[Ax]] +dst5L: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:4 ptr; } # dsp:8[Ax|SB|FB] +dst5L: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:8[Ax|SB|FB]] +dst5L: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:4 ptr; } # dsp:16[Ax|SB|FB] +dst5L: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:16[Ax|SB|FB]] +dst5L: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:4 ptr; } # dsp:24[Ax] +dst5L: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:24[Ax]] +dst5L: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:4 ptr; } # abs16 (+byteIndexOffset) +dst5L: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:4 imm16_dat; } # abs16 (special constant address case) +dst5L: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs16] +dst5L: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:4 ptr; } # abs24 +dst5L: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:4 imm24_dat; } # abs24 (special constant address case) +dst5L: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs24] + +# 3-Byte destination effective address specified by 5-bit encoding (b1_d5/b2_d5) +dst5A: dst5dsp8^[b2_d5_regAxSF] is (b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8; export ptr; } # dsp:8[Ax|SB|FB] +dst5A: dst5dsp16^[b2_d5_regAxSF] is (b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16; export ptr; } # dsp:16[Ax|SB|FB] +dst5A: dst5dsp24^[b2_d5_regAx] is (b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24; export ptr; } # dsp:24[Ax] +dst5A: imm16_dat is (b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *[const]:3 imm16_dat; } # abs16 (special constant address case) +dst5A: imm24_dat is (b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *[const]:3 imm24_dat; } # abs24 (special constant address case) + +# Ax destination specified by 5-bit encoding (b1_d5/b2_d5) +# NOTE! Ax destination is special case and must be handled seperately by each instruction +# Starting position is at instruction b1 +dst5Ax: b2_d5_regAx is b1_d5; b2_d5_regAx { export b2_d5_regAx; } + +# 1/2/4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) +# This handles the case for dst5B, dst5W and dst5L where 5-bit encoded Source (src5) add-on bytes may exist before Destination add-on bytes +# Variable length pattern starting at instruction byte b1 +dst5B_afterSrc5: dst5B is dst5B [ dstFollowsSrc=1; ] { export dst5B; } + +dst5W_afterSrc5: dst5W is dst5W [ dstFollowsSrc=1; ] { export dst5W; } + +dst5L_afterSrc5: dst5L is dst5L [ dstFollowsSrc=1; ] { export dst5L; } + +# 1/2/4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) +# This handles the case for dst5B, dst5W and dst5L where Dsp8 add-on bytes always exist before Destination add-on bytes +# Variable length pattern starting at instruction byte b1 +dst5B_afterDsp8: dst5B is dst5B [ dstFollowsSrc=2; ] { export dst5B; } + +dst5W_afterDsp8: dst5W is dst5W [ dstFollowsSrc=2; ] { export dst5W; } + +# +# The following macros are used to elliminate illegal bit patterns when using dst5 +# These should be used by constructor pattern matching instead of the corresponding dst5 subconstructor +# +@define DST5B "((b1_d5=4 | b1_d5_4=0) ... & dst5B)" +@define DST5W "((b1_d5=4 | b1_d5_4=0) ... & dst5W)" +@define DST5L "((b1_d5=4 | b1_d5_4=0) ... & dst5L)" +@define DST5A "((b1_d5_4=0) ... & dst5A)" +@define DST5AX "((b1_d5=0x0; b2_d5_1=1) & dst5Ax)" +@define DST5B_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5B_afterSrc5)" +@define DST5W_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5W_afterSrc5)" +@define DST5L_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5L_afterSrc5)" +@define DST5B_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5B_afterDsp8)" +@define DST5W_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5W_afterDsp8)" +@define DST5L_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5L_afterDsp8)" + +# dst2... Handle 2-bit encoded Destination specified by b1_d2 +# Variable length pattern starting at instruction byte b1 +# TODO? Certain uses of dst2 should exclude the R0 case (b1_d2=0) +# 1-Byte destination value/location specified by 2-bit encoding (b1_d2) +dst2B: R0L is b1_d2=0 & R0L { export R0L; } +dst2B: imm16_dat is b1_d2=1; imm16_dat { export *:1 imm16_dat; } +dst2B: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:1 ptr; } +dst2B: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:1 ptr; } +dst2B: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:1 ptr; } +dst2B: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:1 ptr; } +dst2B: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:1 ptr; } + +# 2-Byte destination value/location specified by 2-bit encoding (b1_d2) +dst2W: R0 is b1_d2=0 & R0 { export R0; } +dst2W: imm16_dat is b1_d2=1; imm16_dat { export *:2 imm16_dat; } +dst2W: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:2 ptr; } +dst2W: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:2 ptr; } +dst2W: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:2 ptr; } +dst2W: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:2 ptr; } +dst2W: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:2 ptr; } + +# 4-Byte destination value/location specified by 2-bit encoding (b1_d2) +dst2L: R2R0 is b1_d2=0 & R2R0 { export R2R0; } +dst2L: imm16_dat is b1_d2=1; imm16_dat { export *:4 imm16_dat; } +dst2L: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:4 ptr; } +dst2L: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:4 ptr; } +dst2L: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:4 ptr; } +dst2L: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:4 ptr; } +dst2L: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:4 ptr; } + +dsp8spB: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = SP + simm8_dat; export *:1 ptr; } + +dsp8spW: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = SP + simm8_dat; export *:2 ptr; } + +# +# Bit base - associated add-on data immediately follows instruction byte b2 +# (Ax destination case must be handled seperately) +# +# Obtain bitbase offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement +bitbaseDsp8: imm8_dat^":11" is b1_d5; b2_d5; imm8_dat { export *[const]:3 imm8_dat; } +bitbaseDsp8: simm8_dat^":11" is b1_d5; b2_d5=0x3; simm8_dat { export *[const]:3 simm8_dat; } + +bitbaseDsp16: imm16_dat^":19" is b1_d5; b2_d5; imm16_dat { export *[const]:3 imm16_dat; } +bitbaseDsp16: simm16_dat^":19" is b1_d5; b2_d5=0x3; simm16_dat { export *[const]:3 simm16_dat; } + +bitbaseDsp24: imm24_dat^":27" is b1_d5; b2_d5; imm24_dat { export *[const]:3 imm24_dat; } +bitbaseDsp24: simm24_dat^":27" is b1_d5; b2_d5=0x3; simm24_dat { export *[const]:3 simm24_dat; } + +bitbase: b2_d5_reg8 is useBitIndex=0 & b1_d5=0x4; b2_d5_reg8 { export b2_d5_reg8; } # Rx +bitbase: b2_d5_regAx is useBitIndex=0 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:1 = b2_d5_regAx:1; export tmp; } # Ax - read-only case +bitbase: [b2_d5_regAx] is useBitIndex=0 & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:1 ptr; } # [Ax] +bitbase: bitbaseDsp8^[b2_d5_regAxSF] is (useBitIndex=0 & b1_d5=0x1; b2_d5_regAxSF) ... & bitbaseDsp8 { ptr:3 = b2_d5_regAxSF + bitbaseDsp8; export *:1 ptr; } # base:11[Ax|SB|FB] +bitbase: bitbaseDsp16^[b2_d5_regAxSF] is (useBitIndex=0 & b1_d5=0x2; b2_d5_regAxSF) ... & bitbaseDsp16 { ptr:3 = b2_d5_regAxSF + bitbaseDsp16; export *:1 ptr; } # base:19[Ax|SB|FB] +bitbase: bitbaseDsp24^[b2_d5_regAx] is (useBitIndex=0 & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & bitbaseDsp24 { ptr:3 = b2_d5_regAx + bitbaseDsp24; export *:1 ptr; } # base:27[Ax] +bitbase: imm16_dat^":19" is useBitIndex=0 & b1_d5=0x3; b2_d5=0x3; imm16_dat { export *:1 imm16_dat; } # base:19 +bitbase: imm24_dat^":27" is useBitIndex=0 & b1_d5=0x3; b2_d5=0x2; imm24_dat { export *:1 imm24_dat; } # base:27 + +bitbase: [b2_d5_regAx] is useBitIndex=1 & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + (bitIndex / 8); export *:1 ptr; } # [Ax] w/bitIndex +bitbase: bitbaseDsp8^[b2_d5_regAxSF] is (useBitIndex=1 & b1_d5=0x1; b2_d5_regAxSF) ... & bitbaseDsp8 { ptr:3 = b2_d5_regAxSF + bitbaseDsp8 + (bitIndex / 8); export *:1 ptr; } # base:11[Ax|SB|FB] w/bitIndex +bitbase: bitbaseDsp16^[b2_d5_regAxSF] is (useBitIndex=1 & b1_d5=0x2; b2_d5_regAxSF) ... & bitbaseDsp16 { ptr:3 = b2_d5_regAxSF + bitbaseDsp16 + (bitIndex / 8); export *:1 ptr; } # base:19[Ax|SB|FB] w/bitIndex +bitbase: bitbaseDsp24^[b2_d5_regAx] is (useBitIndex=1 & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & bitbaseDsp24 { ptr:3 = b2_d5_regAx + bitbaseDsp24 + (bitIndex / 8); export *:1 ptr; } # base:27[Ax] w/bitIndex +bitbase: imm16_dat^":19" is useBitIndex=1 & b1_d5=0x3; b2_d5=0x3; imm16_dat { ptr:3 = imm16_dat + (bitIndex / 8); export *:1 ptr; } # base:19 w/bitIndex +bitbase: imm24_dat^":27" is useBitIndex=1 & b1_d5=0x3; b2_d5=0x2; imm24_dat { ptr:3 = imm24_dat + (bitIndex / 8); export *:1 ptr; } # base:27 w/bitIndex + +# Ax bitbase destination specified by 5-bit encoding (b1_d5/b2_d5) +# NOTE! Ax destination is special case and must be handled seperately by each instruction +# Starting position is at instruction b1 +bitbaseAx: b2_d5_regAx is b1_d5; b2_d5_regAx { export b2_d5_regAx; } + +bitbaseAbs16: imm16_dat is imm16_dat { export *:1 imm16_dat; } + +# +# The following macros are used to elliminate illegal bit patterns when using dst5 +# These should be used by constructor pattern matching instead of the corresponding dst5 subconstructor +# +@define BITBASE "((b1_d5=4 | b1_d5_4=0) ... & bitbase)" +@define BITBASE_AX "((b1_d5=0x0; b2_d5_1=1) & bitbaseAx)" + +# Bit identifier (may be overriden if useBitIndex has been set by BINDEX instruction +bit: b2_bit is useBitIndex=0 & b2_bit { export *[const]:1 b2_bit; } +bit: [bitIndex] is useBitIndex=1 & bitIndex { val:3 = bitIndex % 8; b:1 = val:1; export b; } + +# +# Immediate data operand +# Fixed length - current position is at start of immediate data +# +srcImm3: "#"^b2_0002 is b2_0002 { export *[const]:1 b2_0002; } +srcImm8: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } + +srcImm8a: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } # used when two imm8 are needed + +srcImm16: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } + +srcImm16a: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } # used when two imm16 are needed + +srcImm24: "#"^imm24_dat is imm24_dat { export *[const]:3 imm24_dat; } +srcImm32: "#"^imm32_dat is imm32_dat { export *[const]:4 imm32_dat; } + +# Unsigned immediate data from 1-bit value: 1 <= value <= 2 (1 added to unsigned bit value) +srcImm1p: "#"^val is b1_0505 [ val = b1_0505 + 1; ] { export *[const]:1 val; } + +# Unsigned immediate data from 2-bit value: 1 <= value <= 8 (1 added to unsigned bit value) +srcImm3p: "#"^val is b1_0405 & b1_0000 [ val = (b1_0405 << 1) + b1_0000 + 1; ] { export *[const]:1 val; } + +srcSimm8: "#"^simm8_dat is simm8_dat { export *[const]:1 simm8_dat; } +srcSimm16: "#"^simm16_dat is simm16_dat { export *[const]:2 simm16_dat; } +srcSimm32: "#"^simm32_dat is simm32_dat { export *[const]:4 simm32_dat; } + +# Signed immediate data from signed 4-bit value: -8 <= value <= 7 +srcSimm4: "#"^b2_simm4 is b2_simm4 { export *[const]:1 b2_simm4; } + +srcSimm8a: srcSimm8 is srcSimm8 { export srcSimm8; } + +srcSimm16a: srcSimm16 is srcSimm16 { export srcSimm16; } + +# Signed immediate shift amount from 4-bit value: -8 <= value <= -1 || 1 <= value <= 8 +srcSimm4Shift: "#"^val is b2_shiftSign=0 & b2_0002 [ val = b2_0002 + 1; ] { export *[const]:1 val; } +srcSimm4Shift: "#"^val is b2_shiftSign=1 & b2_0002 [ val = -(b2_0002 + 1); ] { export *[const]:1 val; } + +srcZero8: "#0" is b1_0007 { export 0:1; } +srcZero16: "#0" is b1_0007 { export 0:2; } + +# special 6-bit immediate for INT number +srcIntNum: "#"^imm6_dat is imm6_dat { export *[const]:1 imm6_dat; } + +# +# Offset label operand +# +abs24offset: imm24_dat is imm24_dat { export *:1 imm24_dat; } + +abs16offset: imm16_dat is imm16_dat { export *:1 imm16_dat; } + +# Relative address offsets +rel16offset1: offs is simm16_dat [ offs = inst_start + 1 + simm16_dat; ] { export *:1 offs; } + +rel8offset1: offs is simm8_dat [ offs = inst_start + 1 + simm8_dat; ] { export *:1 offs; } +rel8offset2: offs is simm8_dat [ offs = inst_start + 2 + simm8_dat; ] { export *:1 offs; } + +rel3offset2: offs is b1_0405 & b1_0000 [ offs = inst_start + 2 + ((b1_0405 << 1) + b1_0000); ] { export *:1 offs; } + +reloffset_dst5W: dst5W is $(DST5W) { local reladdr = inst_start + dst5W; export *:3 reladdr; } + +reloffset_dst5L: dst5L is $(DST5L) { local reladdr = inst_start + dst5L; export *:3 reladdr; } + +reloffset_dst5Ax: dst5Ax is $(DST5AX) { local reladdr = inst_start + dst5Ax; export *:3 reladdr; } + +# +# Conditionals (see BMcnd) +# +# TODO!! Need to verify conditional logic pulled from old slaspec +# TODO: the 'cnd' subconstructor should really constrain the bits 4-7 to 0x0, however this exposes a sleigh compiler problem +cnd: "LTU" is cnd_dat=0x0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +cnd: "LEU" is cnd_dat=0x1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +cnd: "NE" is cnd_dat=0x2 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +cnd: "PZ" is cnd_dat=0x3 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +cnd: "NO" is cnd_dat=0x4 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +cnd: "GT" is cnd_dat=0x5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +cnd: "GE" is cnd_dat=0x6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +cnd: "GEU" is cnd_dat=0x8 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +cnd: "GTU" is cnd_dat=0x9 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +cnd: "EQ" is cnd_dat=0xa { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +cnd: "N" is cnd_dat=0xb { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) +cnd: "O" is cnd_dat=0xc { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +cnd: "LE" is cnd_dat=0xd { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +cnd: "LT" is cnd_dat=0xe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +b2cnd: "LTU" is b2_0606=0 & b2_0002=0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +b2cnd: "LEU" is b2_0606=0 & b2_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +b2cnd: "NE" is b2_0606=0 & b2_0002=2 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +b2cnd: "PZ" is b2_0606=0 & b2_0002=3 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +b2cnd: "NO" is b2_0606=0 & b2_0002=4 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +b2cnd: "GT" is b2_0606=0 & b2_0002=5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +b2cnd: "GE" is b2_0606=0 & b2_0002=6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +b2cnd: "GEU" is b2_0606=1 & b2_0002=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +b2cnd: "GTU" is b2_0606=1 & b2_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +b2cnd: "EQ" is b2_0606=1 & b2_0002=2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +b2cnd: "N" is b2_0606=1 & b2_0002=3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) +b2cnd: "O" is b2_0606=1 & b2_0002=4 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +b2cnd: "LE" is b2_0606=1 & b2_0002=5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +b2cnd: "LT" is b2_0606=1 & b2_0002=6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +b1cnd: "LTU" is b1_0406=0 & b1_0000=0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 +b1cnd: "LEU" is b1_0406=0 & b1_0000=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) +b1cnd: "NE" is b1_0406=1 & b1_0000=0 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 +b1cnd: "PZ" is b1_0406=1 & b1_0000=1 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) +b1cnd: "NO" is b1_0406=2 & b1_0000=0 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 +b1cnd: "GT" is b1_0406=2 & b1_0000=1 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) +b1cnd: "GE" is b1_0406=3 & b1_0000=0 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) +b1cnd: "GEU" is b1_0406=4 & b1_0000=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 +b1cnd: "GTU" is b1_0406=4 & b1_0000=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) +b1cnd: "EQ" is b1_0406=5 & b1_0000=0 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 +b1cnd: "N" is b1_0406=5 & b1_0000=1 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) +b1cnd: "O" is b1_0406=6 & b1_0000=0 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 +b1cnd: "LE" is b1_0406=6 & b1_0000=1 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) +b1cnd: "LT" is b1_0406=7 & b1_0000=0 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) + +# +# Flag bit operand +# +flagBit: "C" is b2_0002=0 { export 0:2; } +flagBit: "D" is b2_0002=1 { export 1:2; } +flagBit: "Z" is b2_0002=2 { export 2:2; } +flagBit: "S" is b2_0002=3 { export 3:2; } +flagBit: "B" is b2_0002=4 { export 4:2; } +flagBit: "O" is b2_0002=5 { export 5:2; } +flagBit: "I" is b2_0002=6 { export 6:2; } +flagBit: "U" is b2_0002=7 { export 7:2; } + +with: phase=1 { +# +# Instruction Constructors +# +##### ABS ##### +# (1) ABS.B dst +# 1010 0100 1001 1111 0011 0100 0001 0010 ABS.B 0x1234:16[SB] +# 0000 1001 1010 0100 1001 1111 0011 0100 0001 0010 ABS.B [0x1234:16[SB]] +:ABS.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1f) ... & $(DST5B) ... { + tmp:1 = dst5B; + $(OVERFLOW) = (tmp == 0x80); + if (tmp s>= 0) goto ; + tmp = -tmp; + dst5B = tmp; + + setResultFlags(tmp); +} + +# (1) ABS.B Ax +:ABS.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1f) ... & $(DST5AX) ... { + tmp:1 = dst5Ax:1; + $(OVERFLOW) = (tmp == 0x80); + if (tmp s>= 0) goto ; + tmp = -tmp; + dst5Ax = zext(tmp); + + setResultFlags(tmp); +} + +# (1) ABS.W dst +# 1010 0101 1001 1111 0011 0100 0001 0010 ABS.W 0x1234:16[SB] +# 0000 1001 1010 0101 1001 1111 0011 0100 0001 0010 ABS.W [0x1234:16[SB]] +:ABS.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1f) ... & $(DST5W) ... { + tmp:2 = dst5W; + $(OVERFLOW) = (tmp == 0x8000); + if (tmp s>= 0) goto ; + tmp = -tmp; + dst5W = tmp; + + setResultFlags(tmp); +} + +# (1) ABS.W Ax +:ABS.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1f) ... & $(DST5AX) ... { + tmp:2 = dst5Ax:2; + $(OVERFLOW) = (tmp == 0x8000); + if (tmp s>= 0) goto ; + tmp = -tmp; + dst5Ax = zext(tmp); + + setResultFlags(tmp); +} + +##### ADC ##### + +# (1) ADC.B #simm, dst +# 0000 0001 1000 0100 1010 1110 0011 0100 0001 0010 0101 0110 ADC.B 0x56, 0x1234:16[SB] +# 0000 1001 0000 0001 1000 0100 1010 1110 0011 0100 0001 0010 0101 0110 ABS.B 0x56, [0x1234:16[SB]] +:ADC.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { + tmp:1 = dst5B; + c:1 = $(CARRY); + setAdd3Flags(tmp, srcSimm8, c); + tmp = tmp + srcSimm8 + c; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) ADC.B #simm, Ax +:ADC.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { + tmp:1 = dst5Ax:1; + c:1 = $(CARRY); + setAdd3Flags(tmp, srcSimm8, c); + tmp = tmp + srcSimm8 + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ADC.W #simm, dst +:ADC.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { + tmp:2 = dst5W; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, srcSimm16, c); + tmp = tmp + srcSimm16 + c; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) ADC.B #simm, Ax +:ADC.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { + tmp:2 = dst5Ax:2; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, srcSimm16, c); + tmp = tmp + srcSimm16 + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ADC.B src5, dst5 +:ADC.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + tmp:1 = dst5B_afterSrc5; + s:1 = src5B; + c:1 = $(CARRY); + setAdd3Flags(tmp, s, c); + tmp = tmp + s + c; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) ADC.B src5, Ax +:ADC.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) & $(DST5AX) ...) { + tmp:1 = dst5Ax:1; + s:1 = src5B; + c:1 = $(CARRY); + setAdd3Flags(tmp, s, c); + tmp = tmp + s + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ADC.W src5, dst5 +:ADC.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + tmp:2 = dst5W_afterSrc5; + s:2 = src5W; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, s, c); + tmp = tmp + s + c; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) ADC.W src5, Ax +:ADC.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) & $(DST5AX) ...) { + tmp:2 = dst5Ax:2; + s:2 = src5W; + c:2 = zext($(CARRY)); + setAdd3Flags(tmp, s, c); + tmp = tmp + s + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### ADCF ##### + +# (1) ADCF.B dst +:ADCF.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { + tmp:1 = dst5B; + c:1 = $(CARRY); + setAddFlags(tmp, c); + tmp = tmp + c; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) ADCF.B Ax +:ADCF.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { + tmp:1 = dst5Ax:1; + c:1 = $(CARRY); + setAddFlags(tmp, c); + tmp = tmp + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ADCF.W dst +:ADCF.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { + tmp:2 = dst5W; + c:2 = zext($(CARRY)); + setAddFlags(tmp, c); + tmp = tmp + c; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) ADCF.B Ax +:ADCF.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { + tmp:2 = dst5Ax:2; + c:2 = zext($(CARRY)); + setAddFlags(tmp, c); + tmp = tmp + c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### ADD ##### + +# (1) ADD.B:G #simm, dst +:ADD^".B:G" srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { + tmp:1 = dst5B; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) ADD.B:G #simm, Ax +:ADD^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { + tmp:1 = dst5Ax:1; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ADD.W:G #simm, dst +:ADD^".W:G" srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { + tmp:2 = dst5W; + setAddFlags(tmp, srcSimm16); + tmp = tmp + srcSimm16; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) ADD.W:G #simm, Ax +:ADD^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { + tmp:2 = dst5Ax:2; + setAddFlags(tmp, srcSimm16); + tmp = tmp + srcSimm16; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ADD.L:G #simm, dst +:ADD^".L:G" srcSimm32, dst5L is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { + tmp:4 = dst5L; + setAddFlags(tmp, srcSimm32); + tmp = tmp + srcSimm32; + dst5L = tmp; + setResultFlags(tmp); +} + +# (2) ADD.L:G #simm, Ax +:ADD^".L:G" srcSimm32, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { + tmp:4 = zext(dst5Ax); + setAddFlags(tmp, srcSimm32); + tmp = tmp + srcSimm32; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (3) ADD.B:G #simm4, dst +:ADD^".B:G" srcSimm4, dst5B is (b1_0507=0x7 & b1_size_4=0 & b1_size_0=0; b2_0405=3 & srcSimm4) ... & $(DST5B) { + tmp:1 = dst5B; + setAddFlags(tmp, srcSimm4); + tmp = tmp + srcSimm4; + dst5B = tmp; + setResultFlags(tmp); +} + +# (3) ADD.B:G #simm4, Ax +:ADD^".B:G" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=0 & b1_size_0=0; b2_0405=3 & srcSimm4) & $(DST5AX) { + tmp:1 = dst5Ax:1; + setAddFlags(tmp, srcSimm4); + tmp = tmp + srcSimm4; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) ADD.W:Q #simm4, dst +:ADD^".W:Q" srcSimm4, dst5W is (b1_0507=0x7 & b1_size_4=0 & b1_size_0=1; b2_0405=3 & srcSimm4) ... & $(DST5W) { + tmp:2 = dst5W; + imm:2 = sext(srcSimm4); + setAddFlags(tmp, imm); + tmp = tmp + imm; + dst5W = tmp; + setResultFlags(tmp); +} + +# (3) ADD.W:Q #simm4, Ax +:ADD^".W:Q" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=0 & b1_size_0=1; b2_0405=3 & srcSimm4) & $(DST5AX) { + tmp:2 = dst5Ax:2; + imm:2 = sext(srcSimm4); + setAddFlags(tmp, imm); + tmp = tmp + imm; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) ADD.L:Q #simm4, dst +:ADD^".L:Q" srcSimm4, dst5L is (b1_0507=0x7 & b1_size_4=1 & b1_size_0=0; b2_0405=3 & srcSimm4) ... & $(DST5L) { + tmp:4 = dst5L; + imm:4 = sext(srcSimm4); + setAddFlags(tmp, imm); + tmp = tmp + imm; + dst5L = tmp; + setResultFlags(tmp); +} + +# (3) ADD.L:Q #simm4, Ax +:ADD^".L:Q" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=1 & b1_size_0=0; b2_0405=3 & srcSimm4) & $(DST5AX) { + tmp:4 = sext(dst5Ax); + imm:4 = sext(srcSimm4); + setAddFlags(tmp, imm); + tmp = tmp + imm; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (4) ADD.B:S #simm, dst +:ADD^".B:S" srcSimm8, dst2B is ((b1_0607=0 & b1_0103=3 & b1_size_0=0) ... & dst2B); srcSimm8 { + tmp:1 = dst2B; + setAddFlags(tmp, srcSimm8); + tmp = tmp + srcSimm8; + dst2B = tmp; + setResultFlags(tmp); +} + +# (4) ADD.W:S #simm, dst +# 0010 0111 0101 0110 0011 0100 0001 0010 ADD.W:S #0x1234, 0x56:8[SB] +:ADD^".W:S" srcSimm16, dst2W is ((b1_0607=0 & b1_0103=3 & b1_size_0=1) ... & dst2W); srcSimm16 { + tmp:2 = dst2W; + setAddFlags(tmp, srcSimm16); + tmp = tmp + srcSimm16; + dst2W = tmp; + setResultFlags(tmp); +} + +# (5) ADD.L:S #imm1, Ax +:ADD^".L:S" srcImm1p, b1_d1_regAx is b1_0607=2 & srcImm1p & b1_0104=0x6 & b1_d1_regAx { + tmp:4 = sext(b1_d1_regAx); + imm:4 = zext(srcImm1p); + setAddFlags(tmp, imm); + tmp = tmp + imm; + b1_d1_regAx = tmp:3; + setResultFlags(tmp); +} + +# (6) ADD.B:G src, dst +# 1011 0110 0001 1000 0101 0110 0011 0100 0001 0010 0011 0011 0010 0010 0001 0001 ADD.B:G 0x123456:24[A0], 112233[A1] +# 1100 0101 1111 1000 0011 0100 0001 0010 ADD.W:G R1, 0x1234:16[FB] +:ADD^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + tmp:1 = dst5B_afterSrc5; + src:1 = src5B; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (6) ADD.B:G src, Ax - Ax destination case +:ADD^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1; + src:1 = src5B; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (6) ADD.W:G src, dst +:ADD^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + tmp:2 = dst5W_afterSrc5; + src:2 = src5W; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (6) ADD.W:G src, Ax - Ax destination case +:ADD^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2; + src:2 = src5W; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (7) ADD.L:G src, dst +:ADD^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { + tmp:4 = dst5L_afterSrc5; + src:4 = src5L; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5L_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (7) ADD.L:G src, Ax - Ax destination case +:ADD^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5L) & $(DST5AX) ... { + tmp:4 = zext(dst5Ax); + src:4 = src5L; + setAddFlags(tmp, src); + tmp = tmp + src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (8) ADD.l:G #simm16, SP +:ADD^".L:G" srcSimm16, SP is b1_0007=0xb6 & SP; b2_0007=0x13; srcSimm16 { + # not done as 32-bit calculation to simplify stack analysis + imm:3 = sext(srcSimm16); + setAddFlags(SP, imm); + SP = SP + imm; + setResultFlags(SP); +} + +# (9) ADD.L:Q #imm3, SP +:ADD^".L:Q" srcImm3p, SP is b1_0607=1 & srcImm3p & b1_0103=1 & SP { + # not done as 32-bit calculation to simplify stack analysis + imm:3 = zext(srcImm3p); + setAddFlags(SP, imm); + SP = SP + imm; + setResultFlags(SP); +} + +# (10) ADD.L:S #simm8, SP +:ADD^".L:S" srcSimm8, SP is b1_0007=0xb6 & SP; b2_0007=0x03; srcSimm8 { + # not done as 32-bit calculation to simplify stack analysis + imm:3 = sext(srcSimm8); + setAddFlags(SP, imm); + SP = SP + imm; + setResultFlags(SP); +} + +##### ADDX ##### + +# (1) ADDX #simm, dst5 +:ADDX srcSimm8, dst5L is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { + tmp:4 = dst5L; + src:4 = sext(srcSimm8); + setAddFlags(tmp, src); + tmp = tmp + src; + dst5L = tmp; + setResultFlags(tmp); +} + +# (1) ADDX #simm, Ax +:ADDX srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { + tmp:4 = zext(dst5Ax); + src:4 = sext(srcSimm8); + setAddFlags(tmp, src); + tmp = tmp + src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (2) ADDX src5, dst5 +:ADDX src5B, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) ... & $(DST5L_AFTER_SRC5) ... { + tmp:4 = dst5L_afterSrc5; + src:4 = sext(src5B); + setAddFlags(tmp, src); + tmp = tmp + src; + dst5L_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) ADDX src5, Ax +:ADDX src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) & $(DST5AX) ... { + tmp:4 = zext(dst5Ax); + src:4 = sext(src5B); + setAddFlags(tmp, src); + tmp = tmp + src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +##### ADJNZ ##### + +# ADJNZ.B #simm4, dst, rel8offset2 +# 1111 1000 1001 1111 0000 0110 ADJNZ #-0x1,R0L, +:ADJNZ.B srcSimm4, dst5B, rel8offset2 is ((b1_0407=0xf & b1_size_0=0; b2_0405=1 & srcSimm4) ... & $(DST5B)); rel8offset2 { + tmp:1 = dst5B + srcSimm4; + dst5B = tmp; + if (tmp != 0) goto rel8offset2; +} + +# ADJNZ.B #simm4, Ax, , rel8offset2 +:ADJNZ.B srcSimm4, dst5Ax, rel8offset2 is ((b1_0407=0xf & b1_size_0=0; b2_0405=1 & srcSimm4) & $(DST5AX)); rel8offset2 { + tmp:1 = dst5Ax:1 + srcSimm4; + dst5Ax = zext(tmp); + if (tmp != 0) goto rel8offset2; +} + +# ADJNZ.W #simm4, dst, rel8offset2 +:ADJNZ.W srcSimm4, dst5W, rel8offset2 is ((b1_0407=0xf & b1_size_0=1; b2_0405=1 & srcSimm4) ... & $(DST5W)); rel8offset2 { + tmp:2 = dst5W + sext(srcSimm4); + dst5W = tmp; + if (tmp != 0) goto rel8offset2; +} + +# ADJNZ.W #simm4, Ax, rel8offset2 +:ADJNZ.W srcSimm4, dst5Ax, rel8offset2 is ((b1_0407=0xf & b1_size_0=1; b2_0405=1 & srcSimm4) & $(DST5AX)); rel8offset2 { + tmp:2 = dst5Ax:2 + sext(srcSimm4); + dst5Ax = zext(tmp); + if (tmp != 0) goto rel8offset2; +} + +##### AND ##### + +# (1) AND.B:G #imm, dst +:AND^".B:G" srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcImm8 { + tmp:1 = dst5B & srcImm8; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) AND.B:G #imm, Ax +:AND^".B:G" srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcImm8 { + tmp:1 = dst5Ax:1 & srcImm8; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) AND.W:G #imm, dst +:AND^".W:G" srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcImm16 { + tmp:2 = dst5W & srcImm16; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) AND.W:G #imm, Ax +:AND^".W:G" srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcImm16 { + tmp:2 = dst5Ax:2 & srcImm16; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) AND.B:S #imm, dst +:AND^".B:S" srcImm8, dst2B is ((b1_0607=1 & b1_0103=6 & b1_size_0=0) ... & dst2B); srcImm8 { + tmp:1 = dst2B & srcImm8; + dst2B = tmp; + setResultFlags(tmp); +} + +# (2) AND.W:S #imm, dst +:AND^".W:S" srcImm16, dst2W is ((b1_0607=1 & b1_0103=6 & b1_size_0=1) ... & dst2W); srcImm16 { + tmp:2 = dst2W & srcImm16; + dst2W = tmp; + setResultFlags(tmp); +} + +# (3) AND.B:G src5, dst5 +:AND^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + tmp:1 = dst5B_afterSrc5 & src5B; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (3) AND.B:G src5, Ax +:AND^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1 & src5B; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) AND.W:G src5, dst5 +:AND^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + tmp:2 = dst5W_afterSrc5 & src5W; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (3) AND.W:G src5, Ax +:AND^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2 & src5W; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### BAND ##### + +# BAND bit,bitbase +# 0000 0001 1101 0110 0000 1011 0101 0110 0011 0100 0001 0010 BAND 0x3,0x123456[A0] +:BAND bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x1 & bit) ... & $(BITBASE)) { + bitValue:1 = (bitbase >> bit) & 1; + $(CARRY) = $(CARRY) & bitValue; +} + +##### BCLR ##### + +# BCLR bit,bitbase +# 1101 0110 0011 0011 0101 0110 0011 0100 0001 0010 BCLR 0x3,0x123456[A0] +:BCLR bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & bit) ... & $(BITBASE) { + mask:1 = ~(1 << bit); + bitbase = bitbase & mask; +} + +# BCLR bit,Ax +:BCLR b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & b2_bit) & $(BITBASE_AX) { + mask:3 = ~(1 << b2_bit); + bitbaseAx = bitbaseAx & mask; +} + +##### BITINDEX ##### + +# BITINDEX.B src -- dst5B used as source +# 1100 1000 1010 1110 BINDEX.B R0L +:BITINDEX.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) + [ useBitIndex=1; globalset(inst_next,useBitIndex); useBitIndex=0; ] { + bitIndex = zext(dst5B); +} + +# BITINDEX.W src -- dst5W used as source +:BITINDEX.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) + [ useBitIndex=1; globalset(inst_next,useBitIndex); useBitIndex=0; ] { + bitIndex = zext(dst5W); +} + +##### BMCnd ##### + +# (1) BMcnd bit, bitbase +:BM^cnd bit, bitbase is ((b1_0407=0xd & b1_size_0=0; b2_0305=0x2 & bit) ... & $(BITBASE)); cnd { + mask:1 = ~(1 << bit); + bitbase = ((cnd << bit) | (bitbase & mask)); +} + +# (1) BMcnd bit, Ax +:BM^cnd b2_bit, bitbaseAx is ((b1_0407=0xd & b1_size_0=0; b2_0305=0x2 & b2_bit) & $(BITBASE_AX)); cnd { + mask:3 = ~(1 << b2_bit); + bitbaseAx = ((zext(cnd) << b2_bit) | (bitbaseAx & mask)); +} + +# (2) BMcnd C +:BM^b2cnd "C" is b1_0007=0xd9; b2_0707=0 & b2_0305=5 & b2cnd { + $(CARRY) = b2cnd; +} + +##### BNAND ##### + +:BNAND bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + $(CARRY) = $(CARRY) && (bitValue == 0); +} + +:BNAND b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + $(CARRY) = $(CARRY) && (bitValue == 0); +} + +##### BNOR ##### + +:BNOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + $(CARRY) = $(CARRY) || (bitValue == 0); +} + +:BNOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + $(CARRY) = $(CARRY) || (bitValue == 0); +} + +##### BNOT ##### + +# BNOT bit,bitbase +:BNOT bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & bit) ... & $(BITBASE) { + mask:1 = (1 << bit); + val:1 = bitbase; + bitValue:1 = (~val & mask); + bitbase = (val & ~mask) | bitValue; +} + +# BNOT bit,Ax +:BNOT b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & b2_bit) & $(BITBASE_AX) { + mask:3 = (1 << b2_bit); + bitValue:3 = (~bitbaseAx & mask); + bitbaseAx = (bitbaseAx & ~mask) | bitValue; +} + +##### BNTST ##### + +:BNTST bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + z:1 = (bitValue == 0); + $(CARRY) = z; + $(ZERO) = z; +} + +:BNTST b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + z:1 = (bitValue == 0); + $(CARRY) = z; + $(ZERO) = z; +} + +##### BNXOR ##### + +:BNXOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + $(CARRY) = $(CARRY) ^ (bitValue == 0); +} + +:BNXOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + $(CARRY) = $(CARRY) ^ (bitValue == 0); +} + +##### BOR ##### + +:BOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + $(CARRY) = $(CARRY) || (bitValue != 0); +} + +:BOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + $(CARRY) = $(CARRY) || (bitValue != 0); +} + +##### BRK ##### + +:BRK is b1_0007=0x0 { + # I don't think it is necessary to model break behavior + Break(); +} + +##### BRK2 ##### + +:BRK2 is b1_0007=0x8 { + # I don't think it is necessary to model break behavior + Break2(); +} + +##### BSET ##### + +:BSET bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & bit) ... & $(BITBASE) { + mask:1 = (1 << bit); + bitbase = bitbase | mask; +} + +:BSET b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & b2_bit) & $(BITBASE_AX) { + mask:3 = (1 << b2_bit); + bitbaseAx = bitbaseAx | mask; +} + +##### BTST ##### + +# (1) BTST bit, bitbase +:BTST bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & bit) ... & $(BITBASE) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +# (1) BTST bit, Ax +:BTST b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & b2_bit) & $(BITBASE_AX) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +# (2) BTST bit, bitbase +:BTST b, bitbaseAbs16 is b1_0607=0 & b1_0405 & b1_0103=5 & b1_0000; bitbaseAbs16 [ b = (b1_0405 << 1) + b1_0000; ] { + mask:1 = (1 << b); + bitValue:1 = (bitbaseAbs16 & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; +} + +##### BTSTC ##### + +# (1) BTSTC bit, bitbase +:BTSTC bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & bit) ... & $(BITBASE) { + mask:1 = (1 << bit); + val:1 = bitbase; + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + bitbase = val & ~mask; +} + +# (1) BTSTC bit, Ax +:BTSTC b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & b2_bit) & $(BITBASE_AX) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + bitbaseAx = bitbaseAx & ~mask; +} + +##### BTSTS ##### + +# (1) BTSTS bit, bitbase +:BTSTS bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & bit) ... & $(BITBASE) { + mask:1 = (1 << bit); + val:1 = bitbase; + bitValue:1 = (val & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + bitbase = val | mask; +} + +# (1) BTSTS bit, Ax +:BTSTS b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & b2_bit) & $(BITBASE_AX) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + z:1 = (bitValue == 0); + $(CARRY) = !z; + $(ZERO) = z; + bitbaseAx = bitbaseAx | mask; +} + +##### BXOR ##### + +:BXOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & bit) ... & $(BITBASE)) { + mask:1 = (1 << bit); + bitValue:1 = (bitbase & mask); + $(CARRY) = $(CARRY) ^ (bitValue != 0); +} + +:BXOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & b2_bit) & $(BITBASE_AX)) { + mask:3 = (1 << b2_bit); + bitValue:3 = (bitbaseAx & mask); + $(CARRY) = $(CARRY) ^ (bitValue != 0); +} + +##### CLIP ##### + +# CLIP.B #simm, #simm, dst5 +:CLIP.B srcSimm8, srcSimm8a, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcSimm8; srcSimm8a { + val:1 = dst5B; + cmp1:1 = srcSimm8 s> val; + cmp2:1 = srcSimm8a s< val; + dst5B = (cmp1 * srcSimm8) + (cmp2 * srcSimm8a) + ((!cmp1 * !cmp2) * val); +} + +# CLIP.B #simm, #simm, Ax +:CLIP.B srcSimm8, srcSimm8a, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) & $(DST5AX)); srcSimm8; srcSimm8a { + val:1 = dst5Ax:1; + cmp1:1 = srcSimm8 s> val; + cmp2:1 = srcSimm8a s< val; + dst5Ax = zext((cmp1 * srcSimm8) + (cmp2 * srcSimm8a) + ((!cmp1 * !cmp2) * val)); +} + +# CLIP.W #simm, #simm, dst5 +:CLIP.W srcSimm16, srcSimm16a, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcSimm16; srcSimm16a { + val:2 = dst5W; + cmp1:1 = srcSimm16 s> val; + cmp2:1 = srcSimm16a s< val; + dst5W = (zext(cmp1) * srcSimm16) + (zext(cmp2) * srcSimm16a) + (zext(!cmp1 * !cmp2) * val); +} + +# CLIP.W #simm, #simm, Ax +:CLIP.W srcSimm16, srcSimm16a, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) & $(DST5AX)); srcSimm16; srcSimm16a { + val:2 = dst5Ax:2; + cmp1:1 = srcSimm16 s> val; + cmp2:1 = srcSimm16a s< val; + dst5Ax = zext((zext(cmp1) * srcSimm16) + (zext(cmp2) * srcSimm16a) + (zext(!cmp1 * !cmp2) * val)); +} + +##### CMP ##### + +# (1) CMP.B:G #simm, dst5 +:CMP^".B:G" srcSimm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { + tmp:1 = dst5B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (1) CMP.B:G #simm, Ax +:CMP^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { + tmp:1 = dst5Ax:1; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (1) CMP.W:G #simm, dst5 +:CMP^".W:G" srcSimm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { + tmp:2 = dst5W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + setResultFlags(tmp); +} + +# (1) CMP.W:G #simm, Ax +:CMP^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { + tmp:2 = dst5Ax:2; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + setResultFlags(tmp); +} + +# (2) CMP.L:G #simm, dst5 +:CMP^".L:G" srcSimm32, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { + tmp:4 = dst5L; + setSubtractFlags(tmp, srcSimm32); + tmp = tmp - srcSimm32; + setResultFlags(tmp); +} + +# (2) CMP.L:G #simm, Ax +:CMP^".L:G" srcSimm32, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { + tmp:4 = zext(dst5Ax); + setSubtractFlags(tmp, srcSimm32); + tmp = tmp - srcSimm32; + setResultFlags(tmp); +} + +# (3) CMP.B:Q #simm4, dst5 +:CMP^".B:Q" srcSimm4, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=1 & srcSimm4) ... & $(DST5B) { + tmp:1 = dst5B; + setSubtractFlags(tmp, srcSimm4); + tmp = tmp - srcSimm4; + setResultFlags(tmp); +} + +# (3) CMP.B:Q #simm4, Ax +:CMP^".B:Q" srcSimm4, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=1 & srcSimm4) & $(DST5AX) { + tmp:1 = dst5Ax:1; + setSubtractFlags(tmp, srcSimm4); + tmp = tmp - srcSimm4; + setResultFlags(tmp); +} + +# (3) CMP.W:Q #simm4, dst5 +:CMP^".W:Q" srcSimm4, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=1 & srcSimm4) ... & $(DST5W) { + tmp:2 = dst5W; + imm:2 = sext(srcSimm4); + setSubtractFlags(tmp, imm); + tmp = tmp - imm; + setResultFlags(tmp); +} + +# (3) CMP.W:Q #simm4, Ax +:CMP^".W:Q" srcSimm4, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=1 & srcSimm4) & $(DST5AX) { + tmp:2 = dst5Ax:2; + imm:2 = sext(srcSimm4); + setSubtractFlags(tmp, imm); + tmp = tmp - imm; + setResultFlags(tmp); +} + +# (4) CMP.B:S #simm, dst2 +:CMP^".B:S" srcSimm8, dst2B is ((b1_0607=1 & b1_0103=3 & b1_size_0=0) ... & dst2B); srcSimm8 { + tmp:1 = dst2B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + setResultFlags(tmp); +} + +# (4) CMP.W:S #simm, dst2 +:CMP^".W:S" srcSimm16, dst2W is ((b1_0607=1 & b1_0103=3 & b1_size_0=1) ... & dst2W); srcSimm16 { + tmp:2 = dst2W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + setResultFlags(tmp); +} + +# (5) CMP.B:G src5, dst5 +:CMP^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) { + tmp:1 = dst5B_afterSrc5; + src:1 = src5B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (5) CMP.B:G src5, Ax +:CMP^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1; + src:1 = src5B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (5) CMP.W:G src5, dst5 +:CMP^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) { + tmp:2 = dst5W_afterSrc5; + src:2 = src5W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (5) CMP.W:G src5, Ax +:CMP^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2; + src:2 = src5W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (6) CMP.L:G src5, dst5 +:CMP^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=1) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { + tmp:4 = dst5L_afterSrc5; + src:4 = src5L; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (6) CMP.L:G src5, Ax +:CMP^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=1) ... & $(SRC5L) & $(DST5AX) ... { + tmp:4 = zext(dst5Ax); + src:4 = src5L; + setSubtractFlags(tmp, src); + tmp = tmp - src; + setResultFlags(tmp); +} + +# (7) CMP.B:S src2, R0L +:CMP^".B:S" dst2B, R0L is (b1_0607=1 & b1_0103=0 & b1_size_0=0 & R0L) ... & dst2B { + tmp:1 = dst2B; + setSubtractFlags(R0L, tmp); + tmp = tmp - R0L; + setResultFlags(tmp); +} + +# (7) CMP.W:S src2, R0 +:CMP^".W:S" dst2W, R0 is (b1_0607=1 & b1_0103=0 & b1_size_0=1 & R0) ... & dst2W { + tmp:2 = dst2W; + setSubtractFlags(R0, tmp); + tmp = tmp - R0; + setResultFlags(tmp); +} + +##### CMPX ##### + +# CMPX #simm, dst5 +:CMPX srcSimm8, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { + tmp:4 = dst5L; + imm:4 = sext(srcSimm8); + setSubtractFlags(tmp, imm); + tmp = tmp - imm; + setResultFlags(tmp); +} + +# CMPX #simm, Ax +:CMPX srcSimm8, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { + tmp:4 = zext(dst5Ax); + imm:4 = sext(srcSimm8); + setSubtractFlags(tmp, imm); + tmp = tmp - imm; + setResultFlags(tmp); +} + +##### DADC ##### + +# (1) DADC.B #imm, dst5 +:DADC.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5B); + tmp:2 = DecimalAddWithCarry(src, dst); + dst5B = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (1) DADC.B #imm, Ax +:DADC.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5Ax:1); + tmp:2 = DecimalAddWithCarry(src, dst); + dst5Ax = zext(tmp:1); + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (1) DADC.W #imm, dst5 +:DADC.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5W); + tmp:4 = DecimalAddWithCarry(src, dst); + dst5W = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (1) DADC.W #imm, Ax +:DADC.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5Ax:2); + tmp:4 = DecimalAddWithCarry(src, dst); + dst5Ax = zext(tmp:2); + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (2) DADC.B src5, dst5 +:DADC.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5B_afterSrc5); + tmp:2 = DecimalAddWithCarry(src, dst); + dst5B_afterSrc5 = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADC.B src5, Ax +:DADC.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) & $(DST5AX) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5Ax:1); + tmp:2 = DecimalAddWithCarry(src, dst); + dst5Ax = zext(tmp:1); + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADC.W src5, dst5 +:DADC.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5W_afterSrc5); + tmp:4 = DecimalAddWithCarry(src, dst); + dst5W_afterSrc5 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (2) DADC.W src5, Ax +:DADC.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) & $(DST5AX) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5Ax:2); + tmp:4 = DecimalAddWithCarry(src, dst); + dst5Ax = zext(tmp:2); + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +##### DADD ##### + +# (1) DADD.B #imm, dst5 +:DADD.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5B); + tmp:2 = DecimalAdd(src, dst); + dst5B = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (1) DADD.B #imm, Ax +:DADD.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) & $(DST5AX)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5Ax:1); + tmp:2 = DecimalAdd(src, dst); + dst5Ax = zext(tmp:1); + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (1) DADD.W #imm, dst5 +:DADD.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5W); + tmp:4 = DecimalAdd(src, dst); + dst5W = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (1) DADD.W #imm, Ax +:DADD.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) & $(DST5AX)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5Ax:2); + tmp:4 = DecimalAdd(src, dst); + dst5Ax = zext(tmp:2); + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (2) DADD.B src5, dst5 +:DADD.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5B_afterSrc5); + tmp:2 = DecimalAdd(src, dst); + dst5B_afterSrc5 = tmp:1; + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADD.B src5, Ax +:DADD.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) & $(DST5AX) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5Ax:1); + tmp:2 = DecimalAdd(src, dst); + dst5Ax = zext(tmp:1); + $(CARRY) = (tmp > 0x99); + setResultFlags(tmp:1); +} + +# (2) DADD.W src5, dst5 +:DADD.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5W_afterSrc5); + tmp:4 = DecimalAdd(src, dst); + dst5W_afterSrc5 = tmp:2; + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +# (2) DADD.W src5, Ax +:DADD.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5W) & $(DST5AX) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5Ax:2); + tmp:4 = DecimalAdd(src, dst); + dst5Ax = zext(tmp:2); + $(CARRY) = (tmp > 0x9999); + setResultFlags(tmp:2); +} + +##### DEC ##### + +# DEC.B dst5 +:DEC.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { + tmp:1 = dst5B - 1; + dst5B = tmp; + setResultFlags(tmp); +} + +# DEC.B Ax +:DEC.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) & $(DST5AX) { + tmp:1 = dst5Ax:1 - 1; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# DEC.W dst5 +:DEC.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { + tmp:2 = dst5W - 1; + dst5W = tmp; + setResultFlags(tmp); +} + +# DEC.W Ax +:DEC.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0e) & $(DST5AX) { + tmp:2 = dst5Ax:2 - 1; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### DIV ##### + +# (1) DIV.B #imm +:DIV.B srcSimm8 is b1_0007=0xb0; b2_0007=0x43; srcSimm8 { + d:2 = sext(srcSimm8); + q:2 = R0 s/ d; + r:2 = R0 s% d; # remainder has same sign as R0 (dividend) + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIV.W #imm +:DIV.W srcSimm16 is b1_0007=0xb0; b2_0007=0x53; srcSimm16 { + d:4 = sext(srcSimm16); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIV.B src5 +:DIV.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { + d:2 = sext(dst5B); + q:2 = R0 s/ d; + r:2 = R0 s% d; # remainder has same sign as R0 (dividend) + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIV.W src5 +:DIV.W dst5W is (b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { + d:4 = sext(dst5W); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +##### DIVU ##### + +# (1) DIVU.B #imm +:DIVU.B srcImm8 is b1_0007=0xb0; b2_0007=0x03; srcImm8 { + d:2 = zext(srcImm8); + q:2 = R0 / d; + r:2 = R0 % d; + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIVU.W #imm +:DIVU.W srcImm16 is b1_0007=0xb0; b2_0007=0x13; srcImm16 { + d:4 = zext(srcImm16); + q:4 = R2R0 / d; + r:4 = R2R0 % d; + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVU.B src5 +:DIVU.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { + d:2 = zext(dst5B); + q:2 = R0 / d; + r:2 = R0 % d; + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVU.W src5 +:DIVU.W dst5W is (b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { + d:4 = zext(dst5W); + q:4 = R2R0 / d; + r:4 = R2R0 % d; + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +##### DIVX ##### + +# (1) DIVX.B #imm +:DIVX.B srcSimm8 is b1_0007=0xb2; b2_0007=0x43; srcSimm8 { + d:2 = sext(srcSimm8); + q:2 = R0 s/ d; + r:2 = R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (1) DIVX.W #imm +:DIVX.W srcSimm16 is b1_0007=0xb2; b2_0007=0x53; srcSimm16 { + d:4 = sext(srcSimm16); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVX.B src5 +:DIVX.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { + d:2 = sext(dst5B); + q:2 = R0 s/ d; + r:2 = R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + differ:1 = (r s< 0) != (d s< 0); + r = (zext(differ) * (-r)) + (zext(!differ) * r); + R0L = q:1; + R0H = r:1; + q = q s>> 8; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +# (2) DIVX.W src5 +:DIVX.W dst5W is (b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { + d:4 = sext(dst5W); + q:4 = R2R0 s/ d; + r:4 = R2R0 s% d; + + #according to the manual the remainder has the same sign as the quotient + R0 = q:2; + R2 = r:2; + q = q s>> 16; + $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); +} + +##### DSBB ##### + +# (1) DSBB.B #imm, dst5 +:DSBB.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5B); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + dst5B = tmp:1; + setResultFlags(tmp:1); +} + +# (1) DSBB.B #imm, Ax +:DSBB.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5Ax:1); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + dst5Ax = zext(tmp:1); + setResultFlags(tmp:1); +} + +# (1) DSBB.W #imm, dst5 +:DSBB.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5W); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + dst5W = tmp:2; + setResultFlags(tmp:2); +} + +# (1) DSBB.W #imm, Ax +:DSBB.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5Ax:2); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + dst5Ax = zext(tmp:2); + setResultFlags(tmp:2); +} + +# (2) DSBB.B src5, dst5 +:DSBB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5B_afterSrc5); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + dst5B_afterSrc5 = tmp:1; + setResultFlags(tmp:1); +} + +# (2) DSBB.B src5, Ax +:DSBB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) & $(DST5AX) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5Ax:1); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:2 = DecimalSubtractWithBorrow(dst, src); + dst5Ax = zext(tmp:1); + setResultFlags(tmp:1); +} + +# (2) DSBB.W src5, dst5 +:DSBB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5W_afterSrc5); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + dst5W_afterSrc5 = tmp:2; + setResultFlags(tmp:2); +} + +# (2) DSBB.W src5, Ax +:DSBB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) & $(DST5AX) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5Ax:2); + c:1 = $(CARRY); + $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); + tmp:4 = DecimalSubtractWithBorrow(dst, src); + dst5Ax = zext(tmp:2); + setResultFlags(tmp:2); +} + +##### DSUB ##### + +# (1) DSUB.B #imm, dst5 +:DSUB.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5B); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + dst5B = tmp:1; + setResultFlags(tmp:1); +} + +# (1) DSUB.B #imm, Ax +:DSUB.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) & $(DST5AX)); srcImm8 { + src:2 = zext(srcImm8); + dst:2 = zext(dst5Ax:1); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + dst5Ax = zext(tmp:1); + setResultFlags(tmp:1); +} + +# (1) DSUB.W #imm, dst5 +:DSUB.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5W); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + dst5W = tmp:2; + setResultFlags(tmp:2); +} + +# (1) DSUB.W #imm, Ax +:DSUB.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) & $(DST5AX)); srcImm16 { + src:4 = zext(srcImm16); + dst:4 = zext(dst5Ax:2); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + dst5Ax = zext(tmp:2); + setResultFlags(tmp:2); +} + +# (2) DSUB.B src5, dst5 +:DSUB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5B_afterSrc5); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + dst5B_afterSrc5 = tmp:1; + setResultFlags(tmp:1); +} + +# (2) DSUB.B src5, Ax +:DSUB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) & $(DST5AX) ...) { + src:2 = zext(src5B); + dst:2 = zext(dst5Ax:1); + $(CARRY) = (dst >= src); + tmp:2 = DecimalSubtract(dst, src); + dst5Ax = zext(tmp:1); + setResultFlags(tmp:1); +} + +# (2) DSUB.W src5, dst5 +:DSUB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5W_afterSrc5); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + dst5W_afterSrc5 = tmp:2; + setResultFlags(tmp:2); +} + +# (2) DSUB.W src5, Ax +:DSUB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5W) & $(DST5AX) ...) { + src:4 = zext(src5W); + dst:4 = zext(dst5Ax:2); + $(CARRY) = (dst >= src); + tmp:4 = DecimalSubtract(dst, src); + dst5Ax = zext(tmp:2); + setResultFlags(tmp:2); +} + +##### ENTER ##### + +:ENTER srcImm8 is b1_0007=0xec; srcImm8 { + push3(FB); + FB = SP; + SP = SP - zext(srcImm8); +} + +##### EXITD ##### + +:EXITD is b1_0007=0xfc { + SP = FB; + pop3(FB); + pc:3 = 0; + pop3(pc); + return [pc]; +} + +##### EXTS ##### + +# (1) EXTS.B dst5 +:EXTS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) & $(DST5W) { + tmp:2 = sext(dst5B); + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) EXTS.B Ax +:EXTS.B dst5Ax is (b1_0407=0xc & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { + tmp:2 = sext(dst5Ax:1); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) EXTS.W dst5 +:EXTS.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) & $(DST5L) { + tmp:4 = sext(dst5W); + dst5L = tmp; + setResultFlags(tmp); +} + +# (1) EXTS.W Ax +:EXTS.W dst5Ax is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { + tmp:4 = sext(dst5Ax:2); + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (2) EXTS.B src5, dst5 +:EXTS.B src5B, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x7) ... & $(SRC5B) ... & $(DST5W_AFTER_SRC5) ...) { + tmp:2 = sext(src5B); + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) EXTS.B src5, Ax +:EXTS.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x7) ... & $(SRC5B) & $(DST5AX) ...) { + tmp:2 = sext(src5B); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### EXTZ ##### + +# (1) EXTZ.B src5, dst5 +:EXTZ.B src5B, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) ... & $(DST5W_AFTER_SRC5) ...) { + tmp:2 = zext(src5B); + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (1) EXTZ.B src5, Ax +:EXTZ.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) & $(DST5AX) ...) { + tmp:2 = zext(src5B); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### FCLR ##### + +:FCLR flagBit is b1_0007=0xd3; b2_0307=0x1d & flagBit { + mask:2 = ~(1 << flagBit); + FLG = FLG & mask; +} + +##### FREIT ##### + +:FREIT is b1_0007=0x9f { + FLG = SVF; + return [SVP]; +} + +##### FSET ##### + +:FSET flagBit is b1_0007=0xd1; b2_0307=0x1d & flagBit { + mask:2 = (1 << flagBit); + FLG = FLG | mask; +} + +##### INC ##### + +# INC.B dst5 +:INC.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { + tmp:1 = dst5B + 1; + dst5B = tmp; + setResultFlags(tmp); +} + +# INC.B Ax +:INC.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) & $(DST5AX) { + tmp:1 = dst5Ax:1 + 1; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# INC.W dst5 +:INC.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { + tmp:2 = dst5W + 1; + dst5W = tmp; + setResultFlags(tmp); +} + +# INC.W Ax +:INC.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x0e) & $(DST5AX) { + tmp:2 = dst5Ax:2 + 1; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### INDEXB ##### + +# 1000 1000 0100 0011 INDEXB.B R1H +:INDEXB.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B); +} + +:INDEXB.W dst5W is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W); +} + +##### INDEXBD ##### + +:INDEXBD.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B); +} + +:INDEXBD.W dst5W is (b1_0407=0xa & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W); +} + +##### INDEXBS ##### + +:INDEXBS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B); +} + +:INDEXBS.W dst5W is (b1_0407=0xc & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W); +} + +##### INDEXL ##### + +:INDEXL.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 4; +} + +:INDEXL.W dst5W is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 4; +} + +##### INDEXLD ##### + +:INDEXLD.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 4; +} + +:INDEXLD.W dst5W is (b1_0407=0xb & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 4; +} + +##### INDEXLS ##### + +:INDEXLS.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 4; +} + +:INDEXLS.W dst5W is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 4; +} + +##### INDEXW ##### + +:INDEXW.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 2; +} + +:INDEXW.W dst5W is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) + [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 2; +} + +##### INDEXWD ##### + +:INDEXWD.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 2; +} + +:INDEXWD.W dst5W is (b1_0407=0xa & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) + [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 2; +} + +##### INDEXWS ##### + +:INDEXWS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5B) * 2; +} + +:INDEXWS.W dst5W is (b1_0407=0xc & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) + [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { + byteIndexOffset = zext(dst5W) * 2; +} + +##### INT ##### + +:INT srcIntNum is b1_0007=0xbe; imm8_0001=0 & srcIntNum { + push2(FLG); + next:3 = inst_next; + push3(next); + ptr3:3 = (INTB + (zext(srcIntNum) * 0x4)); + pc:3 = *:3 ptr3; + $(STACK_SEL) = ((srcIntNum > 0x1f) * $(STACK_SEL)); + $(INTERRUPT) = 0x0; + $(DEBUG) = 0x0; + call [pc]; +} + +##### INTO ##### + +:INTO is b1_0007=0xbf { + if ($(OVERFLOW) == 0) goto inst_next; + push2(FLG); + next:3 = inst_next; + push3(next); + $(STACK_SEL) = 0; + $(INTERRUPT) = 0x0; + $(DEBUG) = 0x0; + call 0x0ffffe0; +} + +##### JCnd ##### + +:J^b1cnd rel8offset1 is b1_0707=1 & b1_0103=5 & b1cnd; rel8offset1 { + if (b1cnd) goto rel8offset1; +} + +##### JMP ##### + +:JMP.S rel3offset2 is b1_0607=1 & b1_0103=5 & rel3offset2 { + goto rel3offset2; +} + +:JMP.B rel8offset1 is b1_0007=0xbb; rel8offset1 { + goto rel8offset1; +} + +:JMP.W rel16offset1 is b1_0007=0xce; rel16offset1 { + goto rel16offset1; +} + +:JMP.A abs24offset is b1_0007=0xcc; abs24offset { + goto abs24offset; +} + +##### JMPI ##### +:JMPI.W reloffset_dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x0f) ... & reloffset_dst5W { + goto reloffset_dst5W; +} + +:JMPI.A reloffset_dst5L is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x01) ... & reloffset_dst5L { + goto reloffset_dst5L; +} + +:JMPI.A reloffset_dst5Ax is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x01) & reloffset_dst5Ax { + goto reloffset_dst5Ax; +} + +##### JMPS ##### + +:JMPS srcImm8 is b1_0007=0xdc; srcImm8 { + # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) + ptr:3 = 0x0fffe - (zext(srcImm8) << 1); + pc:3 = 0xff0000 | zext(*:2 ptr); + goto [pc]; +} + +##### JSR ##### + +:JSR.W rel16offset1 is b1_0007=0xcf; rel16offset1 { + next:3 = inst_next; + push3(next); + call rel16offset1; +} + +:JSR.A abs24offset is b1_0007=0xcd; abs24offset { + next:3 = inst_next; + push3(next); + call abs24offset; +} + +##### JSRI ##### + +:JSRI.W reloffset_dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1f) ... & reloffset_dst5W { + next:3 = inst_next; + push3(next); + call reloffset_dst5W; +} + +:JSRI.A dst5L is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { + next:3 = inst_next; + push3(next); + pc:3 = dst5L:3; + call [pc]; +} + +:JSRI.A dst5Ax is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x01) & $(DST5AX) { + next:3 = inst_next; + push3(next); + call [dst5Ax]; +} + +##### JSRS ##### + +:JSRS srcImm8 is b1_0007=0xdd; srcImm8 { + # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) + next:3 = inst_next; + push3(next); + ptr:3 = 0x0fffe - (zext(srcImm8) << 1); + pc:3 = 0xff0000 | zext(*:2 ptr); + call [pc]; +} + +##### LDC ##### + +# (1) LDC #imm16, b2_creg16 +:LDC srcImm16, b2_creg16 is b1_0007=0xd5; b2_0307=0x15 & b2_creg16; srcImm16 { + b2_creg16 = srcImm16; +} + +# (2) LDC #imm24, b2_creg24 +:LDC srcImm24, b2_creg24 is b1_0007=0xd5; b2_0307=0x05 & b2_creg24; srcImm24 { + b2_creg24 = srcImm24; +} + +# (3) LDC #imm24, b2_dreg24 +:LDC srcImm24, b2_dreg24 is b1_0007=0xd5; b2_0307=0x0d & b2_dreg24; srcImm24 { + b2_dreg24 = srcImm24; +} + +# (4) LDC dst5, b2_creg16 +:LDC dst5W, b2_creg16 is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_creg16) ... & $(DST5W)) { + b2_creg16 = dst5W; +} + +# (5) LDC dst5, b2_creg24 +:LDC dst5L, b2_creg24 is (b1_0407=0xd & b1_size_0=1; b2_0305=0 & b2_creg24) ... & $(DST5L) { + b2_creg24 = dst5L:3; +} + +# (6) LDC dst5, b2_dreg24 +:LDC dst5L, b2_dreg24 is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0 & b2_dreg24) ... & $(DST5L)) { + b2_dreg24 = dst5L:3; +} + +##### LDCTX ##### + +:LDCTX abs16offset, abs24offset is b1_0007=0xb6; b2_0007=0xc3; abs16offset; imm24_dat & abs24offset { + + taskNum:1 = abs16offset; # load task number stored at abs16 + ptr:3 = imm24_dat + (zext(taskNum) * 2); # compute table entry address relative to abs24 + regInfo:1 = *:1 ptr; + ptr = ptr + 1; + spCorrect:1 = *:1 ptr; + + ptr = SP; + + if ((regInfo & 1) == 0) goto ; + R0 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R1 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R2 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + R3 = *:2 ptr; + ptr = ptr + 2; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + tmp:4 = *:4 ptr; + A0 = tmp:3; + ptr = ptr + 4; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + tmp = *:4 ptr; + A1 = tmp:3; + ptr = ptr + 4; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + tmp = *:4 ptr; + SB = tmp:3; + ptr = ptr + 4; + + regInfo = regInfo >> 1; + if ((regInfo & 1) == 0) goto ; + tmp = *:4 ptr; + FB = tmp:3; + ptr = ptr + 4; + + SP = SP + zext(spCorrect); +} + +##### LDIPL ##### + +:LDIPL srcImm3 is b1_0007=0xd5; b2_0307=0x1d & srcImm3 { + $(IPL) = srcImm3; +} + +##### MAX ##### + +# (1) MAX.B #imm, dst5 +:MAX.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcSimm8 { + if (srcSimm8 s<= dst5B) goto inst_next; + dst5B = srcSimm8; +} + +# (1) MAX.B #imm, Ax +:MAX.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcSimm8 { + if (srcSimm8 s<= dst5Ax:1) goto inst_next; + dst5Ax = zext(srcSimm8); +} + +# (1) MAX.W #imm, dst5 +:MAX.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcSimm16 { + if (srcSimm16 s<= dst5W) goto inst_next; + dst5W = srcSimm16; +} + +# (1) MAX.W #imm, Ax +:MAX.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcSimm16 { + if (srcSimm16 s<= dst5Ax:2) goto inst_next; + dst5Ax = zext(srcSimm16); +} + +# (2) MAX.B src5, dst5 +:MAX.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + val:1 = src5B; + if (val s<= dst5B_afterSrc5) goto inst_next; + dst5B_afterSrc5 = val; +} + +# (2) MAX.B src5, Ax +:MAX.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) & $(DST5AX) ...) { + val:1 = src5B; + if (val s<= dst5Ax:1) goto inst_next; + dst5Ax = zext(val); +} + +# (2) MAX.W src5, dst5 +:MAX.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + val:2 = src5W; + if (val s<= dst5W_afterSrc5) goto inst_next; + dst5W_afterSrc5 = val; +} + +# (2) MAX.W src5, Ax +:MAX.B src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) & $(DST5AX) ...) { + val:2 = src5W; + if (val s<= dst5Ax:2) goto inst_next; + dst5Ax = zext(val); +} + +##### MIN ##### + +# (1) MIN.B #imm, dst5 +:MIN.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcSimm8 { + if (srcSimm8 s>= dst5B) goto inst_next; + dst5B = srcSimm8; +} + +# (1) MIN.B #imm, Ax +:MIN.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcSimm8 { + if (srcSimm8 s>= dst5Ax:1) goto inst_next; + dst5Ax = zext(srcSimm8); +} + +# (1) MIN.W #imm, dst5 +:MIN.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcSimm16 { + if (srcSimm16 s>= dst5W) goto inst_next; + dst5W = srcSimm16; +} + +# (1) MIN.W #imm, Ax +:MIN.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcSimm16 { + if (srcSimm16 s>= dst5Ax:2) goto inst_next; + dst5Ax = zext(srcSimm16); +} + +# (2) MIN.B src5, dst5 +:MIN.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + val:1 = src5B; + if (val s>= dst5B_afterSrc5) goto inst_next; + dst5B_afterSrc5 = val; +} + +# (2) MIN.B src5, Ax +:MIN.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) & $(DST5AX) ...) { + val:1 = src5B; + if (val s>= dst5Ax:1) goto inst_next; + dst5Ax = zext(val); +} + +# (2) MIN.W src5, dst5 +:MIN.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + val:2 = src5W; + if (val s>= dst5W_afterSrc5) goto inst_next; + dst5W_afterSrc5 = val; +} + +# (2) MIN.W src5, Ax +:MIN.B src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) & $(DST5AX) ...) { + val:2 = src5W; + if (val s>= dst5Ax:2) goto inst_next; + dst5Ax = zext(val); +} + +##### MOV ##### + +# (1) MOV.B:G #imm, dst5 +:MOV^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcImm8 { + val:1 = srcImm8; + dst5B = val; + setResultFlags(val); +} + +# (1) MOV.B:G #imm, Ax +:MOV^".B:G" srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcImm8 { + val:1 = srcImm8; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (1) MOV.W:G #imm, dst5 +:MOV^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcImm16 { + val:2 = srcImm16; + dst5W = val; + setResultFlags(val); +} + +# (1) MOV.W:G #imm, Ax +:MOV^".W:G" srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcImm16 { + val:2 = srcImm16; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (2) MOV.L:G #imm, dst5 +:MOV^".L:G" srcImm32, dst5L is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcImm32 { + val:4 = srcImm32; + dst5L = val; + setResultFlags(val); +} + +# (2) MOV.L:G #imm, Ax +:MOV^".L:G" srcImm32, dst5Ax is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcImm32 { + val:4 = srcImm32; + dst5Ax = val:3; + setResultFlags(val); +} + +# (3) MOV.B:Q #imm4, dst5 +:MOV^".B:Q" srcSimm4, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=2 & srcSimm4) ... & $(DST5B) { + val:1 = srcSimm4; + dst5B = val; + setResultFlags(val); +} + +# (3) MOV.B:Q #imm4, Ax +:MOV^".B:Q" srcSimm4, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=2 & srcSimm4) & $(DST5AX) { + val:1 = srcSimm4; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (3) MOV.W:Q #imm4, dst5 +:MOV^".W:Q" srcSimm4, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=2 & srcSimm4) ... & $(DST5W) { + val:2 = sext(srcSimm4); + dst5W = val; + setResultFlags(val); +} + +# (3) MOV.W:Q #imm4, Ax +:MOV^".W:Q" srcSimm4, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=2 & srcSimm4) & $(DST5AX) { + val:2 = sext(srcSimm4); + dst5Ax = zext(val); + setResultFlags(val); +} + +# (4) MOV.B:S #imm, dst2 +:MOV^".B:S" srcImm8, dst2B is ((b1_0607=0 & b1_0103=2 & b1_size_0=0) ... & dst2B); srcImm8 { + val:1 = srcImm8; + dst2B = val; + setResultFlags(val); +} + +# (4) MOV.W:S #imm, dst2 +:MOV^".W:S" srcImm16, dst2W is ((b1_0607=0 & b1_0103=2 & b1_size_0=1) ... & dst2W); srcImm16 { + val:2 = srcImm16; + dst2W = val; + setResultFlags(val); +} + +# (5) MOV.W:S #imm16, Ax +:MOV^".W:S" srcImm16, b1_d1_regAx is b1_0607=2 & b1_size_5=0 & b1_0104=0xe & b1_d1_regAx; srcImm16 { + val:2 = srcImm16; + b1_d1_regAx = zext(val); + setResultFlags(val); +} + +# (5) MOV.L:S #imm24, Ax +:MOV^".L:S" srcImm24, b1_d1_regAx is b1_0607=2 & b1_size_5=1 & b1_0104=0xe & b1_d1_regAx; srcImm24 { + val:3 = srcImm24; + b1_d1_regAx = val; + setResultFlags(val); +} + +# (6) MOV.B:Z #0, dst2 +:MOV^".B:Z" srcZero8, dst2B is (b1_0607=0 & b1_0103=1 & b1_size_0=0 & srcZero8) ... & dst2B { + dst2B = 0; + $(SIGN) = 0; + $(ZERO) = 1; +} + +# (6) MOV.W:Z #0, dst2 +:MOV^".W:Z" srcZero16, dst2W is (b1_0607=0 & b1_0103=1 & b1_size_0=1 & srcZero16) ... & dst2W { + dst2W = 0; + $(SIGN) = 0; + $(ZERO) = 1; +} + +# (7) MOV.B:G src5, dst5 +:MOV^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + val:1 = src5B; + dst5B_afterSrc5 = val; + setResultFlags(val); +} + +# (7) MOV.B:G src5, Ax +:MOV^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) & $(DST5AX) ... { + val:1 = src5B; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (7) MOV.W:G src5, dst5 +:MOV^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xb) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + val:2 = src5W; + dst5W_afterSrc5 = val; + setResultFlags(val); +} + +# (7) MOV.W:G src5, Ax +:MOV^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xb) ... & $(SRC5W) & $(DST5AX) ... { + val:2 = src5W; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (8) MOV.L:G src5, dst5 +:MOV^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x3) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { + val:4 = src5L; + dst5L_afterSrc5 = val; + setResultFlags(val); +} + +# (8) MOV.L:G src5, Ax +:MOV^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x3) ... & $(SRC5L) & $(DST5AX) ... { + val:4 = src5L; + dst5Ax = val:3; + setResultFlags(val); +} + +# (9) MOV.B:S src2, R0L +:MOV^".B:S" dst2B, R0L is (R0L & b1_0607=0 & b1_0103=4 & b1_size_0=0) ... & dst2B { + val:1 = dst2B; + R0L = val; + setResultFlags(val); +} + +# (9) MOV.W:S src2, R0 +:MOV^".W:S" dst2W, R0 is (R0 & b1_0607=0 & b1_0103=4 & b1_size_0=1) ... & dst2W { + val:2 = dst2W; + R0 = val; + setResultFlags(val); +} + +# (10) MOV.B:S src2, R1L +:MOV^".B:S" dst2B, R1L is (R1L & b1_0607=1 & b1_0103=7 & b1_size_0=0) ... & dst2B { + val:1 = dst2B; + R1L = val; + setResultFlags(val); +} + +# (10) MOV.W:S src2, R1 +:MOV^".W:S" dst2W, R1 is (R1 & b1_0607=1 & b1_0103=7 & b1_size_0=1) ... & dst2W { + val:2 = dst2W; + R1 = val; + setResultFlags(val); +} + +# (11) MOV.B:S R0L, dst2 +:MOV^".B:S" R0L, dst2B is (R0L & b1_0607=0 & b1_0103=0 & b1_size_0=0) ... & dst2B { + val:1 = R0L; + dst2B = val; + setResultFlags(val); +} + +# (11) MOV.W:S R0, dst2 +:MOV^".W:S" R0, dst2W is (R0 & b1_0607=0 & b1_0103=0 & b1_size_0=1) ... & dst2W { + val:2 = R0; + dst2W = val; + setResultFlags(val); +} + +# (12) MOV.L:S src2L, Ax +:MOV^".L:S" dst2L, b1_d1_regAx is (b1_0607=1 & b1_0103=4 & b1_d1_regAx) ... & dst2L { + val:4 = dst2L; + b1_d1_regAx = val:3; + setResultFlags(val); +} + +# (13) MOV.B:G dsp:8[SP], dst5 +# 1011 0110 1000 1111 0001 0010 1110 1111 1100 1101 1010 1011 MOV.G:G 0x12(SP),0xabcdef +:MOV^".B:G" dsp8spB, dst5B_afterDsp8 is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0f; dsp8spB) ... & $(DST5B_AFTER_DSP8) { + val:1 = dsp8spB; + dst5B_afterDsp8 = val; + setResultFlags(val); +} + +# (13) MOV.B:G dsp:8[SP], Ax +:MOV^".B:G" dsp8spB, dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0f; dsp8spB) & $(DST5AX) ... { + val:1 = dsp8spB; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (13) MOV.W:G dsp:8[SP], dst5 +:MOV^".W:G" dsp8spW, dst5W_afterDsp8 is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0f; dsp8spW) ... & $(DST5W_AFTER_DSP8) { + val:2 = dsp8spW; + dst5W_afterDsp8 = val; + setResultFlags(val); +} + +# (13) MOV.W:G dsp:8[SP], Ax +:MOV^".W:G" dsp8spW, dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0f; dsp8spW) & $(DST5AX) ... { + val:2 = dsp8spW; + dst5Ax = zext(val); + setResultFlags(val); +} + +# (14) MOV.B:G src5, dsp:8[SP] +:MOV^".B:G" dst5B, dsp8spB is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B)); dsp8spB { + val:1 = dst5B; + dsp8spB = val; + setResultFlags(val); +} + +# (14) MOV.W:G src5, dsp:8[SP] +:MOV^".W:G" dst5W, dsp8spW is ((b1_0407=0xa & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W)); dsp8spW { + val:2 = dst5W; + dsp8spW = val; + setResultFlags(val); +} + +##### MOVA ##### + +# MOVA dst5A, RxRx +:MOVA dst5A, b2_reg32 is (b1_0407=0xd & b1_size_0=1; b2_0105=0xc & b2_reg32) ... & $(DST5A) { + b2_reg32 = zext(dst5A); +} + +# MOVA dst5A, Ax +:MOVA dst5A, b2_regAx is (b1_0407=0xd & b1_size_0=1; b2_0105=0xd & b2_regAx) ... & $(DST5A) { + b2_regAx = dst5A; +} + +##### MOVDir ##### + +# TODO: dst5B=Ax case will parse but is not valid + +# (1) MOVDir R0L, dst +:MOVLL R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)) { + dst5B = (R0L & 0x0f) | (dst5B & 0xf0); +} +:MOVHL R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)) { + dst5B = ((R0L & 0xf0) >> 4) | (dst5B & 0xf0); +} +:MOVLH R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)) { + dst5B = ((R0L & 0x0f) << 4) | (dst5B & 0x0f); +} +:MOVHH R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)) { + dst5B = (R0L & 0xf0) | (dst5B & 0x0f); +} + +# (2) MOVDir dst, R0L +:MOVLL dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)) { + R0L = (dst5B & 0x0f) | (R0L & 0xf0); +} +:MOVHL dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)) { + R0L = ((dst5B & 0xf0) >> 4) | (R0L & 0xf0); +} +:MOVLH dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)) { + R0L = ((dst5B & 0x0f) << 4) | (R0L & 0x0f); +} +:MOVHH dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)) { + R0L = (dst5B & 0xf0) | (R0L & 0x0f); +} + +##### MOVX ##### + +:MOVX srcSimm8, dst5L is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { + val:4 = sext(srcSimm8); + dst5L = val; + setResultFlags(val); +} + +:MOVX srcSimm8, dst5Ax is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { + val:3 = sext(srcSimm8); + dst5Ax = val; + setResultFlags(val); +} + +##### MUL ##### + +# TODO: Illegal MUL destination cases will parse but are not valid (e.g., R0H/R2, R1H/R3) + +# (1) MUL.B #imm, dst5 +:MUL.B srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1f) ... & $(DST5W) & $(DST5B)); srcSimm8 { + dst5W = sext(srcSimm8) * sext(dst5B); +} + +# (1) MUL.B #imm, Ax +:MUL.B srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1f) & $(DST5AX)); srcSimm8 { + val:2 = sext(srcSimm8) * sext(dst5Ax:1); + dst5Ax = zext(val); +} + +# (1) MUL.W #imm, dst5 +:MUL.W srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1f) ... & $(DST5L) & $(DST5W)); srcSimm16 { + dst5L = sext(srcSimm16) * sext(dst5W); +} + +# (1) MUL.W #imm, Ax +:MUL.W srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1f) & $(DST5AX)); srcSimm16 { + val:4 = sext(srcSimm16) * sext(dst5Ax:2); + dst5Ax = val:3; +} + +# (2) MUL.B src5, dst5 +:MUL.B src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... & $(DST5W_AFTER_SRC5) ... { + dst5W_afterSrc5 = sext(src5B) * sext(dst5B_afterSrc5); +} + +# (2) MUL.B src5, Ax +:MUL.B src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) & $(DST5AX) ... { + val:2 = sext(src5B) * sext(dst5Ax:1); + dst5Ax = zext(val); +} + +# (2) MUL.W src5, dst5 +:MUL.W src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... & $(DST5L_AFTER_SRC5) ... { + dst5L_afterSrc5 = sext(src5W) * sext(dst5W_afterSrc5); +} + +# (2) MUL.W src5, Ax +:MUL.W src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) & $(DST5AX) ... { + val:4 = sext(src5W) * sext(dst5Ax:2); + dst5Ax = val:3; +} + +##### MULEX ##### + +:MULEX dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { + R1R2R0 = sext(R2R0) * sext(dst5W); +} + +##### MULU ##### + +# TODO: Illegal MULU destination cases will parse but are not valid (e.g., R0H/R2, R1H/R3) + +# (1) MULU.B #imm, dst5 +:MULU.B srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B) & $(DST5W)); srcImm8 { + dst5W = zext(srcImm8) * zext(dst5B); +} + +# (1) MULU.B #imm, Ax +:MULU.B srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0f) & $(DST5AX)); srcImm8 { + val:2 = zext(srcImm8) * zext(dst5Ax:1); + dst5Ax = zext(val); +} + +# (1) MULU.W #imm, dst5 +:MULU.W srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W) & $(DST5L)); srcImm16 { + dst5L = zext(srcImm16) * zext(dst5W); +} + +# (1) MULU.W #imm, Ax +:MULU.W srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0f) & $(DST5AX)); srcImm16 { + val:4 = zext(srcImm16) * zext(dst5Ax:2); + dst5Ax = val:3; +} + +# (2) MULU.B src5, dst5 +:MULU.B src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... & $(DST5W_AFTER_SRC5) ... { + dst5W_afterSrc5 = zext(src5B) * zext(dst5B_afterSrc5); +} + +# (2) MULU.B src5, Ax +:MULU.B src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) & $(DST5AX) ... { + val:2 = zext(src5B) * zext(dst5Ax:1); + dst5Ax = zext(val); +} + +# (2) MULU.W src5, dst5 +:MULU.W src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... & $(DST5L_AFTER_SRC5) ... { + dst5L_afterSrc5 = zext(src5W) * zext(dst5W_afterSrc5); +} + +# (2) MULU.W src5, Ax +:MULU.W src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) & $(DST5AX) ... { + val:4 = zext(src5W) * zext(dst5Ax:2); + dst5Ax = val:3; +} + +##### NEG ##### + +# NEG.B dst5 +:NEG.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B) { + tmp:1 = dst5B; + setSubtractFlags(0:1, tmp); + tmp = -tmp; + dst5B = tmp; + setResultFlags(tmp); +} + +# NEG.B Ax +:NEG.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2f) & $(DST5AX) { + tmp:1 = dst5Ax:1; + setSubtractFlags(0:1, tmp); + tmp = -tmp; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# NEG.W dst5 +:NEG.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W) { + tmp:2 = dst5W; + setSubtractFlags(0:2, tmp); + tmp = -tmp; + dst5W = tmp; + setResultFlags(tmp); +} + +# NEG.W Ax +:NEG.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2f) & $(DST5AX) { + tmp:2 = dst5Ax:2; + setSubtractFlags(0:2, tmp); + tmp = -tmp; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### NOP ##### + +:NOP is b1_0007=0xde { +} + +##### NOT ##### + +# NOT.B dst5 +:NOT.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { + tmp:1 = ~dst5B; + dst5B = tmp; + setResultFlags(tmp); +} + +# NOT.B Ax +:NOT.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { + tmp:1 = ~dst5Ax:1; + tmp = tmp; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# NOT.W dst5 +:NOT.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { + tmp:2 = ~dst5W; + dst5W = tmp; + setResultFlags(tmp); +} + +# NOT.W Ax +:NOT.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { + tmp:2 = ~dst5Ax:2; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### OR ##### + +# (1) OR.B:G #imm, dst +:OR^".B:G" srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcImm8 { + tmp:1 = dst5B & srcImm8; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) OR.B:G #imm, Ax +:OR^".B:G" srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcImm8 { + tmp:1 = dst5Ax:1 & srcImm8; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) OR.W:G #imm, dst +:OR^".W:G" srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcImm16 { + tmp:2 = dst5W & srcImm16; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) OR.W:G #imm, Ax +:OR^".W:G" srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcImm16 { + tmp:2 = dst5Ax:2 & srcImm16; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) OR.B:S #imm, dst +:OR^".B:S" srcImm8, dst2B is ((b1_0607=1 & b1_0103=2 & b1_size_0=0) ... & dst2B); srcImm8 { + tmp:1 = dst2B & srcImm8; + dst2B = tmp; + setResultFlags(tmp); +} + +# (2) OR.W:S #imm, dst +:OR^".W:S" srcImm16, dst2W is ((b1_0607=1 & b1_0103=2 & b1_size_0=1) ... & dst2W); srcImm16 { + tmp:2 = dst2W & srcImm16; + dst2W = tmp; + setResultFlags(tmp); +} + +# (3) OR.B:G src5, dst5 +:OR^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x5) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + tmp:1 = dst5B_afterSrc5 & src5B; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (3) OR.B:G src5, Ax +:OR^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x5) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1 & src5B; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (3) OR.W:G src5, dst5 +:OR^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x5) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + tmp:2 = dst5W_afterSrc5 & src5W; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (3) OR.W:G src5, Ax +:OR^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x5) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2 & src5W; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### POP ##### + +# POP.B dst5 +:POP.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B) { + pop1(dst5B); +} + +# POP.B Ax +:POP.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2f) & $(DST5AX) { + val:1 = 0; + pop1(val); + dst5Ax = zext(val); +} + +# POP.W dst5 +:POP.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W) { + pop2(dst5W); +} + +# POP.W Ax +:POP.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2f) & $(DST5AX) { + val:2 = 0; + pop2(val); + dst5Ax = zext(val); +} + +##### POPC ##### + +# (1) POPC reg16 +:POPC b2_creg16 is b1_0007=0xd3; b2_0307=0x15 & b2_creg16 { + pop2(b2_creg16); +} + +# (2) POPC reg24 +:POPC b2_creg24 is b1_0007=0xd3; b2_0307=0x05 & b2_creg24 { + pop3(b2_creg24); +} + +##### POPM ##### +popRegFB: FB is regBit7=1 & FB { pop3(FB); } +popRegFB: is regBit7=0 { } + +popRegSB: SB popRegFB is regBit6=1 & popRegFB & SB { pop3(SB); build popRegFB; } +popRegSB: popRegFB is popRegFB { build popRegFB; } + +popRegA1: A1 popRegSB is regBit5=1 & popRegSB & A1 { pop3(A1); build popRegSB; } +popRegA1: popRegSB is popRegSB { build popRegSB; } +popRegA0: A0 popRegA1 is regBit4=1 & popRegA1 & A0 { pop3(A0); build popRegA1; } +popRegA0: popRegA1 is popRegA1 { build popRegA1; } + +popRegR3: R3 popRegA0 is regBit3=1 & popRegA0 & R3 { pop2(R3); build popRegA0; } +popRegR3: popRegA0 is popRegA0 { build popRegA0; } +popRegR2: R2 popRegR3 is regBit2=1 & popRegR3 & R2 { pop2(R2); build popRegR3; } +popRegR2: popRegR3 is popRegR3 { build popRegR3; } +popRegR1: R1 popRegR2 is regBit1=1 & popRegR2 & R1 { pop2(R1); build popRegR2; } +popRegR1: popRegR2 is popRegR2 { build popRegR2; } +popRegR0: R0 popRegR1 is regBit0=1 & popRegR1 & R0 { pop2(R0); build popRegR1; } +popRegR0: popRegR1 is popRegR1 { build popRegR1; } + +popRegList: "( "^popRegR0^")" is popRegR0 { build popRegR0; } + +:POPM popRegList is b1_0007=0x8e; popRegList { + build popRegList; +} + +##### PUSH ##### + +# (1) PUSH.B #imm +:PUSH.B srcImm8 is b1_0007=0xae; srcImm8 { + push1(srcImm8); +#tmp:2 = zext(srcImm8); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack +#push2(tmp); +} + +# (1) PUSH.W #imm +:PUSH.B srcImm16 is b1_0007=0xaf; srcImm16 { + push2(srcImm16); +} + +# (2) PUSH.B src5 +:PUSH.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { + push1(dst5B); +#tmp:2 = zext(dst5B); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack +#push2(tmp); +} + +# (2) PUSH.W src5 +:PUSH.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { + push2(dst5W); +} + +# (3) PUSH.L #imm +:PUSH.L srcImm32 is b1_0007=0xb6; b2_0007=0x53; srcImm32 { + push4(srcImm32); +} + +# (4) PUSH.L src5 +:PUSH.L dst5L is (b1_0407=0xa & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { + push4(dst5L); +} + +##### PUSHA ##### + +:PUSHA dst5A is (b1_0407=0xb & b1_size_0=0; b2_0005=0x01) ... & $(DST5A) { + push3(dst5A); +#tmp:4 = zext(dst5A); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack +#push4(tmp); +} + +##### PUSHC ##### + +# (1) PUSHC reg16 +:PUSHC b2_creg16 is b1_0007=0xd1; b2_0307=0x15 & b2_creg16 { + push2(b2_creg16); +} + +# (2) PUSHC reg24 +:PUSHC b2_creg24 is b1_0007=0xd1; b2_0307=0x05 & b2_creg24 { + push3(b2_creg24); +#tmp:4 = zext(b2_creg24); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack +#push4(tmp); +} + +##### PUSHM ##### +pushRegR0: R0 is regBit7=1 & R0 { push2(R0); } +pushRegR0: is regBit7=0 { } +pushRegR1: pushRegR0 R1 is regBit6=1 & pushRegR0 & R1 { push2(R1); build pushRegR0; } +pushRegR1: pushRegR0 is pushRegR0 { build pushRegR0; } +pushRegR2: pushRegR1 R2 is regBit5=1 & pushRegR1 & R2 { push2(R2); build pushRegR1; } +pushRegR2: pushRegR1 is pushRegR1 { build pushRegR1; } +pushRegR3: pushRegR2 R3 is regBit4=1 & pushRegR2 & R3 { push2(R3); build pushRegR2; } +pushRegR3: pushRegR2 is pushRegR2 { build pushRegR2; } + +pushRegA0: pushRegR3 A0 is regBit3=1 & pushRegR3 & A0 { push3(A0); build pushRegR3; } +pushRegA0: pushRegR3 is pushRegR3 { build pushRegR3; } +pushRegA1: pushRegA0 A1 is regBit2=1 & pushRegA0 & A1 { push3(A1); build pushRegA0; } +pushRegA1: pushRegA0 is pushRegA0 { build pushRegA0; } + +pushRegSB: pushRegA1 SB is regBit1=1 & pushRegA1 & SB { push3(SB); build pushRegA1; } +pushRegSB: pushRegA1 is pushRegA1 { build pushRegA1; } + +pushRegFB: pushRegSB FB is regBit0=1 & pushRegSB & FB { push3(FB); build pushRegSB; } +pushRegFB: pushRegSB is pushRegSB { build pushRegSB; } + +pushRegList: "("^pushRegFB^" )" is pushRegFB { build pushRegFB; } + +:PUSHM pushRegList is b1_0007=0x8f; pushRegList { + build pushRegList; +} + +##### REIT ##### + +:REIT is b1_0007=0x9e { + pc:3 = 0; + pop3(pc); + pop2(FLG); + return [pc]; +} + +##### RMPA ##### + +:RMPA.B is b1_0007=0xb8; b2_0007=0x43 { + if (R3 == 0) goto inst_next; + a:1 = *:1 A0; + b:1 = *:1 A1; + A0 = A0 + 1; + A1 = A1 + 1; + prod:6 = sext(a) * sext(b); + o:1 = scarry(R1R2R0, prod); + $(OVERFLOW) = o | $(OVERFLOW); + R1R2R0 = R1R2R0 + prod; + R3 = R3 - 1; + goto inst_start; +} + +:RMPA.W is b1_0007=0xb8; b2_0007=0x53 { + if (R3 == 0) goto inst_next; + a:2 = *:2 A0; + b:2 = *:2 A1; + A0 = A0 + 2; + A1 = A1 + 2; + prod:6 = sext(a) * sext(b); + o:1 = scarry(R1R2R0, prod); + $(OVERFLOW) = o | $(OVERFLOW); + R1R2R0 = R1R2R0 + prod; + R3 = R3 - 1; + goto inst_start; +} + +##### ROLC ##### + +:ROLC.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) { + c:1 = $(CARRY); + tmp:1 = dst5B; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst5B = tmp; + setResultFlags(tmp); +} + +:ROLC.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) & $(DST5AX) { + c:1 = $(CARRY); + tmp:1 = dst5Ax:1; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +:ROLC.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) { + c:2 = zext($(CARRY)); + tmp:2 = dst5W; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst5W = tmp; + setResultFlags(tmp); +} + +:ROLC.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2e) & $(DST5AX) { + c:2 = zext($(CARRY)); + tmp:2 = dst5Ax:2; + $(CARRY) = tmp s< 0; + tmp = (tmp << 1) | c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### RORC ##### + +:RORC.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) { + c:1 = $(CARRY); + tmp:1 = dst5B; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 7); + dst5B = tmp; + setResultFlags(tmp); +} + +:RORC.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) & $(DST5AX) { + c:1 = $(CARRY); + tmp:1 = dst5Ax:1; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 7); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +:RORC.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) { + c:2 = zext($(CARRY)); + tmp:2 = dst5W; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 15); + dst5W = tmp; + setResultFlags(tmp); +} + +:RORC.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2e) & $(DST5AX) { + c:2 = zext($(CARRY)); + tmp:2 = dst5Ax:2; + $(CARRY) = (tmp & 1) == 1; + tmp = (tmp >> 1) | (c << 15); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### ROT ##### + +# (1) ROT.B #imm4, dst5 (right) +:ROT.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { + rightShift:1 = -srcSimm4Shift; + tmp:1 = dst5B; + $(CARRY) = (tmp >> (rightShift - 1)) & 1; + tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) ROT.B #imm4, Ax (right) +:ROT.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + rightShift:1 = -srcSimm4Shift; + tmp:1 = dst5Ax:1; + $(CARRY) = (tmp >> (rightShift - 1)) & 1; + tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ROT.W #imm4, dst5 (right) +:ROT.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { + rightShift:1 = -srcSimm4Shift; + tmp:2 = dst5W; + c:2 = (tmp >> (rightShift - 1)); + $(CARRY) = c:1 & 1; + tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) ROT.W #imm4, Ax (right) +:ROT.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + rightShift:1 = -srcSimm4Shift; + tmp:2 = dst5Ax:2; + c:2 = (tmp >> (rightShift - 1)); + $(CARRY) = c:1 & 1; + tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ROT.B #imm4, dst5 (left) +:ROT.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { + leftShift:1 = srcSimm4Shift; + tmp:1 = dst5B; + $(CARRY) = (tmp >> (8 - leftShift)) & 1; + tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) ROT.B #imm4, Ax (left) +:ROT.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + leftShift:1 = srcSimm4Shift; + tmp:1 = dst5Ax:1; + $(CARRY) = (tmp >> (8 - leftShift)) & 1; + tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) ROT.W #imm4, dst5 (left) +:ROT.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { + leftShift:1 = srcSimm4Shift; + tmp:2 = dst5W; + c:2 = (tmp >> (16 - leftShift)); + $(CARRY) = c:1 & 1; + tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) ROT.W #imm4, Ax (left) +:ROT.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + leftShift:1 = srcSimm4Shift; + tmp:2 = dst5Ax:2; + c:2 = (tmp >> (16 - leftShift)); + $(CARRY) = c:1 & 1; + tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ROT.B R1H, dst5 +:ROT.B R1H, dst5B is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B) { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 8; + tmp:1 = dst5B; + if (shift s>= 0) goto ; + shift = -shift; + $(CARRY) = (tmp >> (shift - 1)) & 1; + tmp = (tmp >> shift) | (tmp << (8 - shift)); + goto ; + + $(CARRY) = (tmp >> (8 - shift)) & 1; + tmp = (tmp << shift) | (tmp >> (8 - shift)); + + dst5B = tmp; + setResultFlags(tmp); +} + +# (2) ROT.B R1H, Ax +:ROT.B R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3f) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 8; + tmp:1 = dst5Ax:1; + if (shift s>= 0) goto ; + shift = -shift; + $(CARRY) = (tmp >> (shift - 1)) & 1; + tmp = (tmp >> shift) | (tmp << (8 - shift)); + goto ; + + $(CARRY) = (tmp >> (8 - shift)) & 1; + tmp = (tmp << shift) | (tmp >> (8 - shift)); + + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) ROT.W R1H, dst5 +:ROT.W R1H, dst5W is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W) { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 16; + tmp:2 = dst5W; + if (shift s>= 0) goto ; + shift = -shift; + c:2 = (tmp >> (shift - 1)); + tmp = (tmp >> shift) | (tmp << (16 - shift)); + goto ; + + c = (tmp >> (16 - shift)); + tmp = (tmp << shift) | (tmp >> (16 - shift)); + + $(CARRY) = c:1 & 1; + dst5W = tmp; + setResultFlags(tmp); +} + +# (2) ROT.W R1H, Ax +:ROT.W R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3f) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H s% 16; + tmp:2 = dst5Ax:2; + if (shift s>= 0) goto ; + shift = -shift; + c:2 = (tmp >> (shift - 1)); + tmp = (tmp >> shift) | (tmp << (16 - shift)); + goto ; + + c = (tmp >> (16 - shift)); + tmp = (tmp << shift) | (tmp >> (16 - shift)); + + $(CARRY) = c:1 & 1; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### RTS ##### + +:RTS is b1_0007=0xdf { + pc:3 = 0; + pop3(pc); + return [pc]; +} + +##### SBB ##### + +# (1) SBB.B #simm, dst +:SBB.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { + tmp:1 = dst5B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, srcSimm8, c); + tmp = tmp - srcSimm8 - c; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) SBB.B #simm, Ax +:SBB.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { + tmp:1 = dst5Ax:1; + c:1 = $(CARRY); + setSubtract3Flags(tmp, srcSimm8, c); + tmp = tmp - srcSimm8 - c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) SBB.W #simm, dst +:SBB.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { + tmp:2 = dst5W; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, srcSimm16, c); + tmp = tmp - srcSimm16 - c; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) SBB.B #simm, Ax +:SBB.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { + tmp:2 = dst5Ax:2; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, srcSimm16, c); + tmp = tmp - srcSimm16 - c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) SBB.B src5, dst5 +:SBB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + tmp:1 = dst5B_afterSrc5; + s:1 = src5B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) SBB.B src5, Ax +:SBB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) & $(DST5AX) ...) { + tmp:1 = dst5Ax:1; + s:1 = src5B; + c:1 = $(CARRY); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) SBB.W src5, dst5 +:SBB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + tmp:2 = dst5W_afterSrc5; + s:2 = src5W; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) SBB.W src5, Ax +:SBB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) & $(DST5AX) ...) { + tmp:2 = dst5Ax:2; + s:2 = src5W; + c:2 = zext($(CARRY)); + setSubtract3Flags(tmp, s, c); + tmp = tmp - s - c; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +##### SBJNZ - PSUEDO-OP! SAME AS ADJNZ ##### + +##### SCCnd ##### + +:SC^b2cnd dst5W is (b1_0407=0xd & b1_size_0=1; b2_0405=3 & b2cnd) ... & $(DST5W) { + dst5W = zext(b2cnd); +} + +:SC^b2cnd dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0405=3 & b2cnd) & $(DST5AX) { + dst5Ax = zext(b2cnd); +} + +##### SCMPU ##### + +:SCMPU.B is b1_0007=0xb8; b2_0007=0xc3 { + tmp0:1 = *:1 A0; + tmp2:1 = *:1 A1; + setSubtractFlags(tmp0, tmp2); + tmp:1 = tmp0 - tmp2; + setResultFlags(tmp); + A0 = A0 + 1; + A1 = A1 + 1; + if ((tmp0 != 0) && (tmp0 == tmp2)) goto inst_start; +} + +:SCMPU.W is b1_0007=0xb8; b2_0007=0xd3 { + # TODO: The symantic description looks suspicious - manual may be incorrect ?? + tmp0:1 = *:1 A0; + tmp2:1 = *:1 A1; + setSubtractFlags(tmp0, tmp2); + setResultFlags(tmp0 - tmp2); + A0 = A0 + 1; + A1 = A1 + 1; + tmp1:1 = *:1 A0; + tmp3:1 = *:1 A1; + A0 = A0 + 1; + A1 = A1 + 1; + if (tmp0 == 0 || tmp0 != tmp2) goto ; + setSubtractFlags(tmp1, tmp3); + setResultFlags(tmp1 - tmp3); + + if ((tmp0 != 0) && (tmp1 != 0) && (tmp0 == tmp2) && (tmp1 == tmp3)) goto inst_start; +} + +##### SHA ##### +macro SHAsetShiftRightFlags(val,shift,result) { + local c = (val >> (shift - 1)) & 1; + $(CARRY) = c:1; + local mask = ~(-(1 << shift)); + allOnes:1 = (mask & val) == mask; + allZeros:1 = (mask & val) == 0; + $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); + setResultFlags(result); +} + +macro SHAsetShiftLeftFlags(val,shift,result,sze) { + local c = (val >> (sze - shift)) & 1; + $(CARRY) = c:1; + local mask = -(1 << shift); + allOnes:1 = (mask & val) == mask; + allZeros:1 = (mask & val) == 0; + $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); + setResultFlags(result); +} + +# (1) SHA.B #imm4, dst5 (right) +:SHA.B srcSimm4Shift, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { + val:1 = dst5B; + shift:1 = -srcSimm4Shift; + tmp:1 = val s>> shift; + dst5B = tmp; + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.B #imm4, Ax (right) +:SHA.B srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + val:1 = dst5Ax:1; + shift:1 = -srcSimm4Shift; + tmp:1 = val s>> shift; + dst5Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.W #imm4, dst5 (right) +:SHA.W srcSimm4Shift, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { + val:2 = dst5W; + shift:1 = -srcSimm4Shift; + tmp:2 = val s>> shift; + dst5W = tmp; + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.W #imm4, Ax (right) +:SHA.W srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + val:2 = dst5Ax:2; + shift:1 = -srcSimm4Shift; + tmp:2 = val s>> shift; + dst5Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHA.B #imm4, dst5 (left) +:SHA.B srcSimm4Shift, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { + val:1 = dst5B; + shift:1 = srcSimm4Shift; + tmp:1 = val << shift; + dst5B = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHA.B #imm4, Ax (left) +:SHA.B srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + val:1 = dst5Ax:1; + shift:1 = srcSimm4Shift; + tmp:1 = val << shift; + dst5Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHA.W #imm4, dst5 (left) +:SHA.W srcSimm4Shift, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { + val:2 = dst5W; + shift:1 = srcSimm4Shift; + tmp:2 = val << shift; + dst5W = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (1) SHA.W #imm4, Ax (left) +:SHA.W srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + val:2 = dst5Ax:2; + shift:1 = srcSimm4Shift; + tmp:2 = val << shift; + dst5Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (2) SHA.L #imm, dst5 +:SHA.L srcSimm8, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x21) ... & $(DST5L)); srcSimm8 { + # Unable to pattern match on sign bit due to interior ellipses + shift:1 = srcSimm8; + val:4 = dst5L; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val s>> shift; + dst5L = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5L = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (2) SHA.L #imm, Ax +:SHA.L srcSimm8, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x21) & $(DST5AX)); srcSimm8 { + # Unable to pattern match on sign bit due to interior ellipses + shift:1 = srcSimm8; + val:4 = zext(dst5Ax); + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val s>> shift; + dst5Ax = tmp:3; + goto inst_next; + + tmp = val << shift; + dst5Ax = tmp:3; +# No flags set +} + +# (3) SHA.B R1H, dst5 +:SHA.B R1H, dst5B is (R1H & b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst5B; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val s>> shift; + dst5B = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5B = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (3) SHA.B R1H, Ax +:SHA.B R1H, dst5Ax is (R1H & b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst5Ax:1; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val s>> shift; + dst5Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (3) SHA.W R1H, dst5 +:SHA.W R1H, dst5W is (R1H & b1_0407=0xb & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst5W; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val s>> shift; + dst5W = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5W = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (3) SHA.W R1H, Ax +:SHA.W R1H, dst5Ax is (R1H & b1_0407=0xb & b1_size_0=1; b2_0005=0x3e) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst5Ax:2; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val s>> shift; + dst5Ax = zext(tmp); + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5Ax = zext(tmp); + SHAsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (4) SHA.L R1H, dst5 +:SHA.L R1H, dst5L is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x11) ... & $(DST5L) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = dst5L; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val s>> shift; + dst5L = tmp; + SHAsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5L = tmp; + SHAsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (4) SHA.L R1H, Ax +:SHA.L R1H, dst5Ax is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x11) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = zext(dst5Ax); + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val s>> shift; + dst5Ax = tmp:3; + goto inst_next; + + tmp = val << shift; + dst5Ax = tmp:3; +# No flags set +} + +##### SHL ##### +macro SHLsetShiftRightFlags(val,shift,result) { + local c = (val >> (shift - 1)) & 1; + $(CARRY) = c:1; + setResultFlags(result); +} + +macro SHLsetShiftLeftFlags(val,shift,result,sze) { + local c = (val >> (sze - shift)) & 1; + $(CARRY) = c:1; + setResultFlags(result); +} + +# (1) SHL.B #imm4, dst5 (right) +:SHL.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { + val:1 = dst5B; + shift:1 = -srcSimm4Shift; + tmp:1 = val >> shift; + dst5B = tmp; + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.B #imm4, Ax (right) +:SHL.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + val:1 = dst5Ax:1; + shift:1 = -srcSimm4Shift; + tmp:1 = val >> shift; + dst5Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.W #imm4, dst5 (right) +:SHL.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { + val:2 = dst5W; + shift:1 = -srcSimm4Shift; + tmp:2 = val >> shift; + dst5W = tmp; + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.W #imm4, Ax (right) +:SHL.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { + val:2 = dst5Ax:2; + shift:1 = -srcSimm4Shift; + tmp:2 = val >> shift; + dst5Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); +} + +# (1) SHL.B #imm4, dst5 (left) +:SHL.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { + val:1 = dst5B; + shift:1 = srcSimm4Shift; + tmp:1 = val << shift; + dst5B = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHL.B #imm4, Ax (left) +:SHL.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + val:1 = dst5Ax:1; + shift:1 = srcSimm4Shift; + tmp:1 = val << shift; + dst5Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (1) SHL.W #imm4, dst5 (left) +:SHL.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { + val:2 = dst5W; + shift:1 = srcSimm4Shift; + tmp:2 = val << shift; + dst5W = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (1) SHL.W #imm4, Ax (left) +:SHL.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { + val:2 = dst5Ax:2; + shift:1 = srcSimm4Shift; + tmp:2 = val << shift; + dst5Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (2) SHL.L #imm, dst5 +:SHL.L srcSimm8, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x21) ... & $(DST5L)); srcSimm8 { + # Unable to pattern match on sign bit due to interior ellipses + shift:1 = srcSimm8; + val:4 = dst5L; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val >> shift; + dst5L = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5L = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (2) SHL.L #imm, Ax +:SHL.L srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x21) & $(DST5AX)); srcSimm8 { + # Unable to pattern match on sign bit due to interior ellipses + shift:1 = srcSimm8; + val:4 = zext(dst5Ax); + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val >> shift; + dst5Ax = tmp:3; + goto inst_next; + + tmp = val << shift; + dst5Ax = tmp:3; +# No flags set +} + +# (3) SHL.B R1H, dst5 +:SHL.B R1H, dst5B is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst5B; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val >> shift; + dst5B = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5B = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (3) SHL.B R1H, Ax +:SHL.B R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:1 = dst5Ax:1; + if (shift s> 0) goto ; + shift = -shift; + tmp:1 = val >> shift; + dst5Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 8); +} + +# (3) SHL.W R1H, dst5 +:SHL.W R1H, dst5W is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst5W; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val >> shift; + dst5W = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5W = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (3) SHL.W R1H, Ax +:SHL.W R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3e) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:2 = dst5Ax:2; + if (shift s> 0) goto ; + shift = -shift; + tmp:2 = val >> shift; + dst5Ax = zext(tmp); + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5Ax = zext(tmp); + SHLsetShiftLeftFlags(val, shift, tmp, 16); +} + +# (4) SHL.L R1H, dst5 +:SHL.L R1H, dst5L is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = dst5L; + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val >> shift; + dst5L = tmp; + SHLsetShiftRightFlags(val, shift, tmp); + goto inst_next; + + tmp = val << shift; + dst5L = tmp; + SHLsetShiftLeftFlags(val, shift, tmp, 32); +} + +# (4) SHL.L R1H, Ax +:SHL.L R1H, dst5Ax is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x01) & $(DST5AX) { + if (R1H == 0) goto inst_next; + shift:1 = R1H; + val:4 = zext(dst5Ax); + if (shift s> 0) goto ; + shift = -shift; + tmp:4 = val >> shift; + dst5Ax = tmp:3; + goto inst_next; + + tmp = val << shift; + dst5Ax = tmp:3; +# No flags set +} + +##### SIN ##### + +:SIN.B is b1_0007=0xb2; b2_0007=0x83 { + if (R3 == 0) goto inst_next; + *:1 A1 = *:1 A0; + A1 = A1 + 1; + R3 = R3 - 1; + goto inst_start; +} + +:SIN.W is b1_0007=0xb2; b2_0007=0x93 { + if (R3 == 0) goto inst_next; + *:2 A1 = *:2 A0; + A1 = A1 + 2; + R3 = R3 - 1; + goto inst_start; +} + +##### SMOVB ##### + +:SMOVB.B is b1_0007=0xb6; b2_0007=0x83 { + if (R3 == 0) goto inst_next; + *:1 A1 = *:1 A0; + A1 = A1 - 1; + A0 = A0 - 1; + R3 = R3 - 1; + goto inst_start; +} + +:SMOVB.W is b1_0007=0xb6; b2_0007=0x93 { + if (R3 == 0) goto inst_next; + *:2 A1 = *:2 A0; + A1 = A1 - 2; + A0 = A0 - 2; + R3 = R3 - 1; + goto inst_start; +} + +##### SMOVF ##### + +:SMOVF.B is b1_0007=0xb0; b2_0007=0x83 { + if (R3 == 0) goto inst_next; + *:1 A1 = *:1 A0; + A1 = A1 + 1; + A0 = A0 + 1; + R3 = R3 - 1; + goto inst_start; +} + +:SMOVF.W is b1_0007=0xb0; b2_0007=0x93 { + if (R3 == 0) goto inst_next; + *:2 A1 = *:2 A0; + A1 = A1 + 2; + A0 = A0 + 2; + R3 = R3 - 1; + goto inst_start; +} + +##### SMOVU ##### + +:SMOVU.B is b1_0007=0xb8; b2_0007=0x83 { + local tmp:1 = *:1 A0; + *:1 A1 = tmp; + A0 = A0 + 1; + A1 = A1 + 1; + if (tmp != 0) goto inst_start; +} + +:SMOVU.W is b1_0007=0xb8; b2_0007=0x93 { + local tmp:2 = *:2 A0; + *:2 A1 = tmp; + A0 = A0 + 2; + A1 = A1 + 2; + local tmp0:2 = tmp & 0xff; + local tmp1:2 = tmp & 0xff00; + if ((tmp0 != 0) && (tmp1 != 0)) goto inst_start; +} + +##### SOUT ##### + +:SOUT.B is b1_0007=0xb4; b2_0007=0x83 { + if (R3 == 0) goto inst_next; + *:1 A1 = *:1 A0; + A0 = A0 + 1; + R3 = R3 - 1; + goto inst_start; +} + +:SOUT.W is b1_0007=0xb4; b2_0007=0x93 { + if (R3 == 0) goto inst_next; + *:2 A1 = *:2 A0; + A0 = A0 + 2; + R3 = R3 - 1; + goto inst_start; +} + +##### SSTR ##### + +:SSTR.B is b1_0007=0xb8; b2_0007=0x03 { + if (R3 == 0) goto inst_next; + *:1 A1 = R0L; + A1 = A1 + 1; + R3 = R3 - 1; + goto inst_start; +} + +:SSTR.W is b1_0007=0xb8; b2_0007=0x13 { + if (R3 == 0) goto inst_next; + *:2 A1 = R0; + A1 = A1 + 2; + R3 = R3 - 1; + goto inst_start; +} + +##### STC ##### + +# (1) STC dreg24, dst5 +:STC b2_dreg24, dst5L is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_dreg24) ... & $(DST5L)) { + dst5L = zext(b2_dreg24); +} + +# (1) STC dreg24, Ax +:STC b2_dreg24, dst5Ax is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_dreg24) & $(DST5AX)) { + dst5Ax = b2_dreg24; +} + +# (2) STC reg16, dst5 +:STC b2_creg16, dst5W is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x3 & b2_creg16) ... & $(DST5W)) { + dst5W = b2_creg16; +} + +# (2) STC reg16, Ax +:STC b2_creg16, dst5Ax is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x3 & b2_creg16) & $(DST5AX)) { + dst5Ax = zext(b2_creg16); +} + +# (3) STC reg24, dst5L +:STC b2_creg24, dst5L is (b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_creg24) ... & $(DST5L) { + dst5L = zext(b2_creg24); +} + +# (3) STC reg24, Ax +:STC b2_creg24, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_creg24) & $(DST5AX) { + dst5Ax = b2_creg24; +} + +##### STCTX ##### + +:STCTX abs16offset, abs24offset is b1_0007=0xb6; b2_0007=0xd3; abs16offset; imm24_dat & abs24offset { + + taskNum:1 = abs16offset; # load task number stored at abs16 + ptr:3 = imm24_dat + (zext(taskNum) * 2); # compute table entry address relative to abs24 + regInfo:1 = *:1 ptr; + ptr = ptr + 1; + spCorrect:1 = *:1 ptr; + + ptr = SP; + + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 4; + *:4 ptr = FB; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 4; + *:4 ptr = SB; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 4; + *:4 ptr = A1; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 4; + *:4 ptr = A0; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R3; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R2; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R1; + + regInfo = regInfo << 1; + if ((regInfo & 0x80) == 0) goto ; + ptr = ptr - 2; + *:2 ptr = R0; + + SP = SP - zext(spCorrect); +} + +##### STNZ ##### + +# (1) STNZ.B #imm, dst5 +:STNZ.B srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1f) ... & $(DST5B)); srcImm8 { + if ($(ZERO) != 0) goto inst_next; + dst5B = srcImm8; +} + +# (1) STNZ.B #imm, Ax +:STNZ.B srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1f) & $(DST5AX)); srcImm8 { + if ($(ZERO) != 0) goto inst_next; + dst5Ax = zext(srcImm8); +} + +# (1) STNZ.W #imm, dst5 +:STNZ.W srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1f) ... & $(DST5W)); srcImm16 { + if ($(ZERO) != 0) goto inst_next; + dst5W = srcImm16; +} + +# (1) STNZ.W #imm, Ax +:STNZ.W srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1f) & $(DST5AX)); srcImm16 { + if ($(ZERO) != 0) goto inst_next; + dst5Ax = zext(srcImm16); +} + +##### STZ ##### + +# (1) STZ.B #imm, dst5 +:STZ.B srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B)); srcImm8 { + if ($(ZERO) == 0) goto inst_next; + dst5B = srcImm8; +} + +# (1) STZ.B #imm, Ax +:STZ.B srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0f) & $(DST5AX)); srcImm8 { + if ($(ZERO) == 0) goto inst_next; + dst5Ax = zext(srcImm8); +} + +# (1) STZ.W #imm, dst5 +:STZ.W srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W)); srcImm16 { + if ($(ZERO) == 0) goto inst_next; + dst5W = srcImm16; +} + +# (1) STZ.W #imm, Ax +:STZ.W srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0f) & $(DST5AX)); srcImm16 { + if ($(ZERO) == 0) goto inst_next; + dst5Ax = zext(srcImm16); +} + +##### STZX ##### + +# STZX.B #imm, #imm, dst5 +:STZX.B srcImm8, srcImm8a, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcImm8; srcImm8a { + z:1 = $(ZERO); + dst5B = (z * srcImm8) + (!z * srcImm8a); +} + +# STZX.B #imm, #imm, Ax +:STZX.B srcImm8, srcImm8a, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcImm8; srcImm8a { + z:1 = $(ZERO); + dst5Ax = zext((z * srcImm8) + (!z * srcImm8a)); +} + +# STZX.W #imm, #imm, dst5 +:STZX.W srcImm16, srcImm16a, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcImm16; srcImm16a { + z:1 = $(ZERO); + dst5W = (zext(z) * srcImm16) + (zext(!z) * srcImm16a); +} + +# STZX.W #imm, #imm, Ax +:STZX.W srcImm16, srcImm16a, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcImm16; srcImm16a { + z:1 = $(ZERO); + dst5Ax = zext((zext(z) * srcImm16) + (zext(!z) * srcImm16a)); +} + +##### SUB ##### + +# (1) SUB.B:G #simm, dst +:SUB^".B:G" srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcSimm8 { + tmp:1 = dst5B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) SUB.B:G #simm, Ax +:SUB^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) & $(DST5AX)); srcSimm8 { + tmp:1 = dst5Ax:1; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) SUB.W:G #simm, dst +:SUB^".W:G" srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcSimm16 { + tmp:2 = dst5W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) SUB.W:G #simm, Ax +:SUB^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) & $(DST5AX)); srcSimm16 { + tmp:2 = dst5Ax:2; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) SUB.L:G #simm, dst +:SUB^".L:G" srcSimm32, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { + tmp:4 = dst5L; + setSubtractFlags(tmp, srcSimm32); + tmp = tmp - srcSimm32; + dst5L = tmp; + setResultFlags(tmp); +} + +# (2) SUB.L:G #simm, Ax +:SUB^".L:G" srcSimm32, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { + tmp:4 = zext(dst5Ax); + setSubtractFlags(tmp, srcSimm32); + tmp = tmp - srcSimm32; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (3) SUB.B:S #simm, dst +:SUB^".B:S" srcSimm8, dst2B is ((b1_0607=0 & b1_0103=7 & b1_size_0=0) ... & dst2B); srcSimm8 { + tmp:1 = dst2B; + setSubtractFlags(tmp, srcSimm8); + tmp = tmp - srcSimm8; + dst2B = tmp; + setResultFlags(tmp); +} + +# (3) SUB.W:S #simm, dst +:SUB^".W:S" srcSimm16, dst2W is ((b1_0607=0 & b1_0103=7 & b1_size_0=1) ... & dst2W); srcSimm16 { + tmp:2 = dst2W; + setSubtractFlags(tmp, srcSimm16); + tmp = tmp - srcSimm16; + dst2W = tmp; + setResultFlags(tmp); +} + +# (4) SUB.B:G src, dst +:SUB^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + tmp:1 = dst5B_afterSrc5; + src:1 = src5B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (4) SUB.B:G src, Ax - Ax destination case +:SUB^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1; + src:1 = src5B; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (4) SUB.W:G src, dst +:SUB^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + tmp:2 = dst5W_afterSrc5; + src:2 = src5W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (4) SUB.W:G src, Ax - Ax destination case +:SUB^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2; + src:2 = src5W; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (5) SUB.L:G src, dst +:SUB^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { + tmp:4 = dst5L_afterSrc5; + src:4 = src5L; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5L_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (5) SUB.L:G src, Ax - Ax destination case +:SUB^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5L) & $(DST5AX) ... { + tmp:4 = zext(dst5Ax); + src:4 = src5L; + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +##### SUBX ##### + +# (1) SUBX #simm, dst5 +:SUBX srcSimm8, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { + tmp:4 = dst5L; + src:4 = sext(srcSimm8); + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5L = tmp; + setResultFlags(tmp); +} + +# (1) SUBX #simm, Ax +:SUBX srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { + tmp:4 = zext(dst5Ax); + src:4 = sext(srcSimm8); + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +# (2) SUBX src5, dst5 +:SUBX src5B, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) ... & $(DST5L_AFTER_SRC5) ... { + tmp:4 = dst5L_afterSrc5; + src:4 = sext(src5B); + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5L_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) SUBX src5, Ax +:SUBX src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) & $(DST5AX) ... { + tmp:4 = zext(dst5Ax); + src:4 = sext(src5B); + setSubtractFlags(tmp, src); + tmp = tmp - src; + dst5Ax = tmp:3; + setResultFlags(tmp); +} + +##### TST ##### + +# (1) TST.B:G #imm, dst +:TST^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcImm8 { + tmp:1 = dst5B & srcImm8; + setResultFlags(tmp); +} + +# (1) TST.W:G #imm, dst +:TST^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcImm16 { + tmp:2 = dst5W & srcImm16; + setResultFlags(tmp); +} + +# (2) TST.B:S #imm, dst +:TST^".B:S" srcImm8, dst2B is ((b1_0607=0 & b1_0103=6 & b1_size_0=0) ... & dst2B); srcImm8 { + tmp:1 = dst2B & srcImm8; + setResultFlags(tmp); +} + +# (2) TST.W:S #imm, dst +:TST^".W:S" srcImm16, dst2W is ((b1_0607=0 & b1_0103=6 & b1_size_0=1) ... & dst2W); srcImm16 { + tmp:2 = dst2W & srcImm16; + setResultFlags(tmp); +} + +# (3) TST.B:G src5, dst5 +:TST^".B:G" src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { + tmp:1 = dst5B_afterSrc5 & src5B; + setResultFlags(tmp); +} + +# (3) TST.W:G src5, dst5 +:TST^".W:G" src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { + tmp:2 = dst5W_afterSrc5 & src5W; + setResultFlags(tmp); +} + +##### UND ##### +# Don't implement this "Undefined" instruction +# :UND is b1_0007=0xff + +##### WAIT ##### + +:WAIT is b1_0007=0xb2; b2_0007=0x03 { + Wait(); +} + +##### XCHG ##### + +# XCHG.B reg8, dst5 +:XCHG.B b2_reg8, dst5B is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0101=0 & b2_reg8) ... & $(DST5B) { + tmp:1 = dst5B; + dst5B = b2_reg8; + b2_reg8 = tmp; +} + +# XCHG.B Ax, dst5 +:XCHG.B b2_regAx, dst5B is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0102=1 & b2_regAx) ... & $(DST5B) { + tmp:1 = dst5B; + dst5B = b2_regAx:1; + b2_regAx = zext(tmp); +} + +# XCHG.B reg8, Ax +:XCHG.B b2_reg8, dst5Ax is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0101=0 & b2_reg8) & $(DST5AX) { + tmp:1 = dst5Ax:1; + dst5Ax = zext(b2_reg8); + b2_reg8 = tmp; +} + +# XCHG.B Ax, Ax +:XCHG.B b2_regAx, dst5Ax is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0102=1 & b2_regAx) & $(DST5AX) { + tmp:1 = dst5Ax:1; + dst5Ax = zext(b2_regAx:1); + b2_regAx = zext(tmp); +} + +# XCHG.W reg16, dst5 +:XCHG.W b2_reg16, dst5W is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0101=0 & b2_reg16) ... & $(DST5W) { + tmp:2 = dst5W; + dst5W = b2_reg16; + b2_reg16 = tmp; +} + +# XCHG.W Ax, dst5 +:XCHG.W b2_regAx, dst5W is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0102=1 & b2_regAx) ... & $(DST5W) { + tmp:2 = dst5W; + dst5W = b2_regAx:2; + b2_regAx = zext(tmp); +} + +# XCHG.W reg16, Ax +:XCHG.W b2_reg16, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0101=0 & b2_reg16) & $(DST5AX) { + tmp:2 = dst5Ax:2; + dst5Ax = zext(b2_reg16); + b2_reg16 = tmp; +} + +# XCHG.W Ax, Ax +:XCHG.W b2_regAx, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0102=1 & b2_regAx) & $(DST5AX) { + tmp:3 = dst5Ax; + dst5Ax = zext(b2_regAx:2); # dest Ax recieves low 16-bits of src Ax zero extended + b2_regAx = tmp; # src Ax recieves all 24-bits of dest Ax +} + +##### XOR ##### + +# (1) XOR.B #imm, dst +:XOR^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { + tmp:1 = dst5B ^ srcImm8; + dst5B = tmp; + setResultFlags(tmp); +} + +# (1) XOR.B #imm, Ax +:XOR^".B:G" srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { + tmp:1 = dst5Ax:1 ^ srcImm8; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (1) XOR.W #imm, dst +:XOR^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { + tmp:2 = dst5W ^ srcImm16; + dst5W = tmp; + setResultFlags(tmp); +} + +# (1) XOR.W #imm, Ax +:XOR^".W:G" srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { + tmp:2 = dst5Ax:2 ^ srcImm16; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) XOR.B src5, dst5 +:XOR^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { + tmp:1 = dst5B_afterSrc5 ^ src5B; + dst5B_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) XOR.B src5, Ax +:XOR^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) & $(DST5AX) ... { + tmp:1 = dst5Ax:1 ^ src5B; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +# (2) XOR.W src5, dst5 +:XOR^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { + tmp:2 = dst5W_afterSrc5 ^ src5W; + dst5W_afterSrc5 = tmp; + setResultFlags(tmp); +} + +# (2) XOR.W src5, Ax +:XOR^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) & $(DST5AX) ... { + tmp:2 = dst5Ax:2 ^ src5W; + dst5Ax = zext(tmp); + setResultFlags(tmp); +} + +} # end phase=1 + diff --git a/pypcode/processors/M16C/data/manuals/M16C_60.idx b/pypcode/processors/M16C/data/manuals/M16C_60.idx new file mode 100644 index 00000000..a57d6692 --- /dev/null +++ b/pypcode/processors/M16C/data/manuals/M16C_60.idx @@ -0,0 +1,95 @@ +@m16csm.pdf +ABS, 55 +ADC, 56 +ADCF, 57 +ADD, 58 +ADJNZ, 60 +AND, 61 +BAND, 63 +BCLR, 64 +BM, 65 +BNAND, 66 +BNOR, 67 +BNOT, 68 +BNTST, 69 +BNXOR, 70 +BOR, 71 +BRK, 72 +BSET, 73 +BTST, 74 +BTSTC, 75 +BTSTS, 76 +BXOR, 77 +CMP, 78 +DADC, 80 +DADD, 81 +DEC, 82 +DIV, 83 +DIVU, 84 +DIVX, 85 +DSBB, 86 +DSUB, 87 +ENTER, 88 +EXITD, 89 +EXTS, 90 +FCLR, 91 +FSET, 92 +INC, 93 +INT, 94 +INTO, 95 +J, 96 +JMP, 97 +JMPI, 98 +JMPS, 99 +JSR, 100 +JSRI, 101 +JSRS, 102 +LDC, 103 +LDCTX, 104 +LDE, 105 +LDINTB, 106 +LDIPL, 107 +MOV, 108 +MOVA, 110 +MOVHH, 111 +MOVHL, 111 +MOVLH, 111 +MOVLL, 111 +MUL, 112 +MULU, 113 +NEG, 114 +NOP, 115 +NOT, 116 +OR, 117 +POP, 119 +POPC, 120 +POPM, 121 +PUSH, 122 +PUSHA, 123 +PUSHC, 124 +PUSHM, 125 +REIT, 126 +RMPA, 127 +ROLC, 128 +RORC, 129 +ROT, 130 +RTS, 131 +SBB, 132 +SBJNZ, 133 +SHA, 134 +SHL, 135 +SMOVB, 136 +SMOVF, 137 +SSTR, 138 +STC, 139 +STCTX, 140 +STE, 141 +STNZ, 142 +STZ, 143 +STZX, 144 +SUB, 145 +TST, 147 +UND, 148 +WAIT, 149 +XCHG, 150 +XOR, 151 diff --git a/pypcode/processors/M16C/data/manuals/M16C_80.idx b/pypcode/processors/M16C/data/manuals/M16C_80.idx new file mode 100644 index 00000000..cd2c20f4 --- /dev/null +++ b/pypcode/processors/M16C/data/manuals/M16C_80.idx @@ -0,0 +1,110 @@ +@m16c80.pdf +ABS, 60 +ADC, 61 +ADCF, 62 +ADD, 63 +ADDX, 65 +ADJNZ, 66 +AND, 67 +BAND, 69 +BCLR, 70 +BITINDEX, 71 +BM, 72 +BNAND, 73 +BNOR, 74 +BNOT, 75 +BNTST, 76 +BNXOR, 77 +BOR, 78 +BRK, 79 +BRK2, 80 +BSET, 81 +BTST, 82 +BTSTC, 83 +BTSTS, 84 +BXOR, 85 +CLIP, 86 +CMP, 87 +CMPX, 89 +DADC, 90 +DADD, 91 +DEC, 92 +DIV, 93 +DIVU, 94 +DIVX, 95 +DSBB, 96 +DSUB, 97 +ENTER, 98 +EXITD, 99 +EXTS, 100 +EXTZ, 101 +FLCR, 102 +FREIT, 103 +FSET, 104 +INC, 105 +INDEX, 175 +INT, 107 +INTO, 108 +J, 109 +JMP, 110 +JMPI, 111 +JMPS, 112 +JSR, 113 +JSRI, 114 +JSRS, 115 +LDC, 116 +LDCTX, 117 +LDIPL, 118 +MAX, 119 +MIN, 120 +MOV, 121 +MOVA, 123 +MOVHH, 124 +MOVHL, 124 +MOVLH, 124 +MOVLL, 124 +MOVX, 125 +MUL, 126 +MULEX, 127 +MULU, 128 +NEG, 129 +NOP, 130 +NOT, 131 +OR, 132 +POP, 134 +POPC, 135 +POPM, 136 +PUSH, 137 +PUSHA, 138 +PUSHC, 139 +PUSHM, 140 +REIT, 141 +RMPA, 142 +ROLC, 143 +RORC, 144 +ROT, 145 +RTS, 146 +SBB, 147 +SBJNZ, 148 +SC, 149 +SCMPU, 150 +SHA, 151 +SHL, 153 +SIN, 155 +SMOVB, 156 +SMOVF, 157 +SMOVU, 158 +SOUT, 159 +SSTR, 160 +STC, 161 +STCTX, 162 +STNZ, 163 +STZ, 164 +STZX, 165 +SUB, 166 +SUBX, 168 +TST, 169 +UND, 171 +WAIT, 172 +XCHG, 173 +XOR, 174 diff --git a/pypcode/processors/MIPS/data/languages/MIPS.opinion b/pypcode/processors/MIPS/data/languages/MIPS.opinion index 00a1625f..15037567 100644 --- a/pypcode/processors/MIPS/data/languages/MIPS.opinion +++ b/pypcode/processors/MIPS/data/languages/MIPS.opinion @@ -86,32 +86,40 @@ Elf e_flags are used for the secondary attribute, the following are pulled from --> - + + secondary= "0b 00.. ..00 .... .... 00.1 0.0. 0000 ...."/> - - - + - - + - - + - + + + + + + + + diff --git a/pypcode/processors/PIC/data/languages/pic16.ldefs b/pypcode/processors/PIC/data/languages/pic16.ldefs index af82b20e..94d988f2 100644 --- a/pypcode/processors/PIC/data/languages/pic16.ldefs +++ b/pypcode/processors/PIC/data/languages/pic16.ldefs @@ -14,7 +14,6 @@ - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -40,6 +136,14 @@ + + + + + + + + diff --git a/pypcode/processors/PIC/data/languages/pic16.sinc b/pypcode/processors/PIC/data/languages/pic16.sinc index d7144dae..68207af1 100644 --- a/pypcode/processors/PIC/data/languages/pic16.sinc +++ b/pypcode/processors/PIC/data/languages/pic16.sinc @@ -2,28 +2,6 @@ # PIC-16 Main Section # includes constants, memory space and common register space definitions # - -# STATUS bit definitions -@define STATUS_IRP_BIT 7 -@define STATUS_RP0_BIT 5 -@define STATUS_Z_BIT 2 -@define STATUS_DC_BIT 1 -@define STATUS_C_BIT 0 - -# STATUS bit masks used for setting -@define STATUS_IRP_MASK 0x80 -@define STATUS_RP_MASK 0x60 -@define STATUS_Z_MASK 0x04 -@define STATUS_DC_MASK 0x02 -@define STATUS_C_MASK 0x01 - -# STATUS bit masks used for clearing -@define STATUS_IRP_CLEARMASK 0x7F -@define STATUS_RP_CLEARMASK 0x9F -@define STATUS_Z_CLEARMASK 0xFB -@define STATUS_DC_CLEARMASK 0xFD -@define STATUS_C_CLEARMASK 0xFE - define endian=little; define alignment=2; @@ -50,18 +28,37 @@ define register offset=0x0002 size=1 [ STKPTR ]; define register offset=0x0003 size=1 [ W SkipNext ]; # Status bit registers (these do not really exist and must get reflected into the STATUS byte register) -@if PROCESSOR == "PIC_16" define register offset=0x0007 size=1 [ IRP RP ]; -@elif PROCESSOR == "PIC_16F" -define register offset=0x0007 size=1 [ IRP RP ]; -@endif @define C "STATUS[0,1]" @define DC "STATUS[1,1]" @define Z "STATUS[2,1]" @define PD "STATUS[3,1]" @define TO "STATUS[4,1]" -@define PA0 "STATUS[5,1]" +@define RP "STATUS[5,2]" +@define IRP "STATUS[7,1]" + +# STATUS bit definitions +@define STATUS_IRP_BIT 7 +@define STATUS_RP_BIT 5 +@define STATUS_Z_BIT 2 +@define STATUS_DC_BIT 1 +@define STATUS_C_BIT 0 + +# STATUS bit masks used for setting +@define STATUS_IRP_MASK 0x80 +@define STATUS_RP_MASK 0x60 +@define STATUS_Z_MASK 0x04 +@define STATUS_DC_MASK 0x02 +@define STATUS_C_MASK 0x01 + +# STATUS bit masks used for clearing +@define STATUS_IRP_CLEARMASK 0x7F +@define STATUS_RP_CLEARMASK 0x9F +@define STATUS_Z_CLEARMASK 0xFB +@define STATUS_DC_CLEARMASK 0xFD +@define STATUS_C_CLEARMASK 0xFE + # # WARNING! - Reflection of these DATA-based registers with the corresponding register @@ -80,16 +77,13 @@ define register offset=0x0007 size=1 [ IRP RP ]; # @if PROCESSOR == "PIC_16" define DATA offset=0x0000 size=1 [ - INDF TMR0 PCL STATUS FSR PORTA PORTB PORTC PORTD PORTE PCLATH INTCON PIR1 PIR2 TMR1L TMR1H + INDF _ PCL STATUS FSR _ _ _ _ _ PCLATH INTCON _ _ _ _ ]; - @elif PROCESSOR == "PIC_16F" define DATA offset=0x0000 size=1 [ - INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR WREG PCLATH INTCON _ _ _ _ + INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR WREG PCLATH INTCON _ _ _ _ ]; - define DATA offset=0x0004 size=2 [ FSR0 FSR1 ]; - @endif # Additional Data Bank data registers are defined in the .PSPEC file. diff --git a/pypcode/processors/PIC/data/languages/pic16_instructions.sinc b/pypcode/processors/PIC/data/languages/pic16_instructions.sinc index 4bec48f4..4d31346e 100644 --- a/pypcode/processors/PIC/data/languages/pic16_instructions.sinc +++ b/pypcode/processors/PIC/data/languages/pic16_instructions.sinc @@ -25,7 +25,6 @@ define token instr16(16) uf7 = (4,6) fsr = (2,2) fsrk = (6,6) - fregCore = (0,3) k5 = (0,4) k6 = (0,5) k7 = (0,6) @@ -44,15 +43,7 @@ define context contextreg ; -@if PROCESSOR == "PIC_16" -attach variables [ fregCore ] [ - INDF TMR0 PCL STATUS FSR PORTA PORTB PORTC PORTD PORTE PCLATH INTCON _ _ _ _ -]; - -@elif PROCESSOR == "PIC_16F" -attach variables [ fregCore ] [ - INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR W PCLATH INTCON _ _ _ _ -]; +@if PROCESSOR == "PIC_16F" attach names [IntConBits] [ IOCIF INTF TMR0IF IOCIE INTE TMR0IE PEIE GIE ]; @@ -178,31 +169,45 @@ srcREG: fv is uf7=0x7 & lf7 [fv = 0x70 + lf7; ] { export *[DATA]:1 addr; } +# The registers listed here are explicitly defined as registers in sleigh. +# There are other registers but they are named in the .pspec file. +# The reason this is done is to have cross references created to certain registers, and to have +# only the registers that must be accessed directly in sleigh (e.g. PCL, FSR) defined in sleigh. +# Register explicitly defined in sleigh will not have xref's created to them. +# Registers named only in the .pspec file will have xref's to them in most cases. +# +# Also, these registers ignore RP, or BSR which allow more registers to be in a different register bank. +# +# PIC16 : INDF _ PCL STATUS FSR _ _ _ _ _ PCLATH INTCON _ _ _ _ +# PIC16F: INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR W PCLATH INTCON _ _ _ _ + # File register index (f7=0): INDF use implies indirect data access using FSR value and IRP bit in STATUS reg @if PROCESSOR == "PIC_16" -srcREG: fregCore is f7=0 & fregCore { +srcREG: INDF is f7=0 & INDF { addr:2 = (zext(IRP) << 8) + zext(FSR); export *[DATA]:1 addr; } -srcREG: fregCore is f7=1 & fregCore { - rpval:2 = zext(RP == 1) + zext(RP == 2); - addr:2 = (zext(rpval) << 7) + 1; - export *[DATA]:1 addr; +srcREG: lf7 is f7=1 & lf7 { + rpval:2 = zext(RP == 1) + zext(RP == 3); + addr:2 = (zext(rpval) << 7) + 1; + export *[DATA]:1 addr; } + @elif PROCESSOR == "PIC_16F" -srcREG: fregCore is f7=0 & fregCore { + +srcREG: INDF0 is f7=0 & INDF0 { addr:2 = FSR0; export *[DATA]:1 addr; } -srcREG: fregCore is f7=1 & fregCore { +srcREG: INDF1 is f7=1 & INDF1 { addr:2 = FSR1; export *[DATA]:1 addr; } @endif # Special File Registers always mapped to Bank-0 -srcREG: fregCore is f7=0x02 & fregCore { +srcREG: PCL is f7=0x02 & PCL { # PCL and PCLATH must be latched addr:2 = inst_start >> 1; # Compensate for CODE wordsize PCL = addr:1; @@ -210,17 +215,19 @@ srcREG: fregCore is f7=0x02 & fregCore { export PCL; } -srcREG: fregCore is f7=0x03 & fregCore { export fregCore; } -srcREG: fregCore is f7=0x04 & fregCore { export fregCore; } -@if PROCESSOR == "PIC_16F" -srcREG: fregCore is f7=0x05 & fregCore { export fregCore; } -srcREG: fregCore is f7=0x06 & fregCore { export fregCore; } -srcREG: fregCore is f7=0x07 & fregCore { export fregCore; } -srcREG: fregCore is f7=0x08 & fregCore { export fregCore; } -srcREG: fregCore is f7=0x09 & fregCore { export fregCore; } +srcREG: STATUS is f7=0x03 & STATUS { export STATUS; } +@if PROCESSOR == "PIC_16" +srcREG: FSR is f7=0x04 & FSR { export FSR; } +@elif PROCESSOR == "PIC_16F" +srcREG: FSR0L is f7=0x04 & FSR0L { export FSR0L; } +srcREG: FSR0H is f7=0x05 & FSR0H { export FSR0H; } +srcREG: FSR1L is f7=0x06 & FSR1L { export FSR1L; } +srcREG: FSR1H is f7=0x07 & FSR1H { export FSR1H; } +srcREG: BSR is f7=0x08 & BSR { export BSR; } +srcREG: W is f7=0x09 & W { export W; } @endif -srcREG: fregCore is f7=0x0a & fregCore { export fregCore; } -srcREG: fregCore is f7=0x0b & fregCore { export fregCore; } +srcREG: PCLATH is f7=0x0a & PCLATH { export PCLATH; } +srcREG: INTCON is f7=0x0b & INTCON { export INTCON; } # Destination register (either srcREG or W) @@ -230,7 +237,7 @@ destREG: "0" is d=0 { export W; } destREG: "1" is d=1 & f7 & srcREG { export srcREG; } # Destination register: Special File Registers always mapped to Bank-0 -destREG: "1" is d=1 & f7=0x02 & fregCore { export fregCore; } # PCL (special behavior reqd) +destREG: "1" is d=1 & f7=0x02 { export PCL; } # PCL (special behavior reqd) # Destination operand representation (w: W register is destination; f: specified srcREG is destination) D: "w" is d=0 { } @@ -238,13 +245,13 @@ D: "f" is d=1 { } # Absolute address generated from k11 and PCLATH<4:3> absAddr11: k11 is k11 { - addr:2 = ((zext(PCLATH) & 0x78) << 8) + k11; + addr:2 = ((zext(PCLATH) & 0x18) << 8) | k11; export addr; } @if PROCESSOR == "PIC_16F" -# Absolute address generated from k11 and PCLATH<4:3> +# Relative address relAddr9: addr is sk9 [ addr = inst_next + sk9; ] { export *[CODE]:2 addr; } @@ -327,7 +334,7 @@ trisREG: "7" is l5=7 { local trl:2 = 0x10E; export *[DATA]:1 trl; } # tmp:1 = addr:1; setAddFlags(tmp, W); tmp = tmp + W; - addr = (zext(PCLATH) << 8) + zext(tmp); + addr = ((zext(PCLATH) & 0x1F) << 8) | zext(tmp); PCL = tmp; setResultFlags(tmp); goto [addr]; @@ -365,7 +372,7 @@ trisREG: "7" is l5=7 { local trl:2 = 0x10E; export *[DATA]:1 trl; } # $(C) = $(C) | tc; val = val + tmpC; - addr = (zext(PCLATH) << 8) + zext(val); + addr = ((zext(PCLATH) & 0x1F) << 8) | zext(val); PCL = val; setResultFlags(val); goto [addr]; @@ -799,7 +806,7 @@ srcFSRk: sk6"["fsrk"]" is fsrk & sk6 { } :MOVLP imm7 is op7=0x63 & imm7 { - PCLATH = imm7; + PCLATH = imm7 & 0x1F; } @endif @@ -831,7 +838,7 @@ srcFSRk: sk6"["fsrk"]" is fsrk & sk6 { # --00 0000 1fff ffff # 0000 0000 1000 0010 -> MOVWF PCL PCL = W; - addr:2 = (zext(PCLATH) << 8) + zext(PCL); + addr:2 = ((zext(PCLATH) & 0x1F) << 8) | zext(PCL); goto [addr]; } diff --git a/pypcode/processors/PIC/data/languages/pic16f.pspec b/pypcode/processors/PIC/data/languages/pic16f.pspec index 44a15b64..a821ddbe 100644 --- a/pypcode/processors/PIC/data/languages/pic16f.pspec +++ b/pypcode/processors/PIC/data/languages/pic16f.pspec @@ -10,9 +10,102 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/PowerPC/data/languages/SPE_APU.sinc b/pypcode/processors/PowerPC/data/languages/SPE_APU.sinc index 56536a05..cc07249e 100644 --- a/pypcode/processors/PowerPC/data/languages/SPE_APU.sinc +++ b/pypcode/processors/PowerPC/data/languages/SPE_APU.sinc @@ -2654,8 +2654,8 @@ define pcodeop ROTL64; define pcodeop VectorSplatFractionalImmediate; :evsplatfi D,BU_SIMM is OP=4 & D & BU_SIMM & XOP_0_10=0x22B { # TODO definition -# RT0:31 􀁉 SI || 270 -# RT32:63 􀁉 SI || 270 +# RT0:31 = SI || 270 +# RT32:63 = SI || 270 # The value specified by SI is padded with trailing zeros # and placed in both elements of RT. The SI ends up in # bit positions RT0:4 and RT32:36. @@ -2687,9 +2687,9 @@ define pcodeop VectorSplatImmediate; define pcodeop VectorShiftRightWordImmediateSigned; :evsrwis D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x223 { # TODO definition -# n 􀁉 UI -# RT0:31 􀁉 EXTS((RA)0:31-n) -# RT32:63 􀁉 EXTS((RA)32:63-n) +# n = UI +# RT0:31 = EXTS((RA)0:31-n) +# RT32:63 = EXTS((RA)32:63-n) # Both high and low elements of RA are shifted right by # the 5-bit UI value. Bits in the most significant positions # vacated by the shift are filled with a copy of the sign bit. @@ -2703,9 +2703,9 @@ define pcodeop VectorShiftRightWordImmediateSigned; define pcodeop VectorShiftRightWordImmediateUnsigned; :evsrwiu D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x222 { # TODO definition -# n 􀁉 UI -# RT0:31 􀁉 EXTZ((RA)0:31-n) -# RT32:63 􀁉 EXTZ((RA)32:63-n) +# n = UI +# RT0:31 = EXTZ((RA)0:31-n) +# RT32:63 = EXTZ((RA)32:63-n) # Both high and low elements of RA are shifted right by # the 5-bit UI value; zeros are shifted into the most significant # position. diff --git a/pypcode/processors/PowerPC/data/languages/evx.sinc b/pypcode/processors/PowerPC/data/languages/evx.sinc index b47a5f18..190b5251 100644 --- a/pypcode/processors/PowerPC/data/languages/evx.sinc +++ b/pypcode/processors/PowerPC/data/languages/evx.sinc @@ -18,15 +18,19 @@ define pcodeop vectorShiftRightWordUnsigned; vrD_64_0 = vrA_64_0 ^ vrB_64_0; } +@if REGISTER_SIZE=="8" :evmergehi S,A,B is OP=4 & S & A & B & XOP_0_10=556 { - vectorMergeHigh(S,A,B); + S[32,32] = A[32,32]; + S[ 0,32] = B[ 0,32]; } :evmergelo S,A,B is OP=4 & S & A & B & XOP_0_10=557 { - vectorMergeLow(S,A,B); + S[32,32] = A[0,32]; + S[ 0,32] = B[0,32]; } +@endif :evldd RT,dUI16PlusRAOrZeroAddress is OP=4 & RT & dUI16PlusRAOrZeroAddress & XOP_0_10=769 @@ -41,19 +45,31 @@ define pcodeop vectorShiftRightWordUnsigned; RT = *:8 ($(EATRUNC)); } +@if REGISTER_SIZE=="8" @ifndef IS_ISA :evsrws S,A,B is OP=4 & S & A & B & XOP_0_10=545 { - vectorShiftRightWordSigned(S,A,B); + local low:4 = A[0,32]; + local high:4 = A[32,32]; + local low_shift:1 = B[0,5]; + local high_shift:1 = B[32,5]; + S[0,32] = low s>> zext(low_shift); + S[32,32] = high s>> zext(high_shift); } @endif @ifndef IS_ISA :evsrwu S,A,B is OP=4 & S & A & B & XOP_0_10=544 { - vectorShiftRightWordUnsigned(S,A,B); + local low:4 = A[0,32]; + local high:4 = A[32,32]; + local low_shift:1 = B[0,5]; + local high_shift:1 = B[32,5]; + S[0,32] = low >> zext(low_shift); + S[32,32] = high >> zext(high_shift); } @endif +@endif :evstdd RS,dUI16PlusRAOrZeroAddress is OP=4 & RS & dUI16PlusRAOrZeroAddress & XOP_0_10=801 { @@ -73,32 +89,20 @@ define pcodeop vectorShiftRightWordUnsigned; RT = RA; } +@if REGISTER_SIZE=="8" # evmergehilo rD,rA,rB 010 0010 1110 -define pcodeop VectorMergeHighLow; -:evmergehilo D,A,B is OP=4 & A & B & D & XOP_0_10=558 { - local lo = (A & 0x00000000FFFFFFFF); - local hi = ((A & 0xFFFFFFFF00000000) >> 32); - #local b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); - local b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); - - lo = lo; - hi = b_hi; +:evmergehilo S,A,B is OP=4 & S & A & B & XOP_0_10=558 { + S[32,32] = A[32,32]; + S[ 0,32] = B[ 0,32]; - D = ((hi << 32) | lo); } # evmergelohi rD,rA,rB 010 0010 1111 -:evmergelohi D,A,B is OP=4 & D & A & B & XOP_0_10=559 { - local lo = (A & 0x00000000FFFFFFFF); - local hi = ((A & 0xFFFFFFFF00000000) >> 32); - local b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); - #local b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); - - lo = lo; - hi = b_lo; - - D = ((hi << 32) | lo); +:evmergelohi S,A,B is OP=4 & S & A & B & XOP_0_10=559 { + S[32,32] = A[ 0,32]; + S[ 0,32] = B[32,32]; } +@endif # evstwwe rS,rA,UIMM 011 0011 1001 :evstwwe RS,dUI16PlusRAOrZeroAddress is OP=4 & RS & dUI16PlusRAOrZeroAddress & XOP_0_10=0x339 diff --git a/pypcode/processors/PowerPC/data/languages/ppc.ldefs b/pypcode/processors/PowerPC/data/languages/ppc.ldefs index 28467d11..b9bb0a04 100644 --- a/pypcode/processors/PowerPC/data/languages/ppc.ldefs +++ b/pypcode/processors/PowerPC/data/languages/ppc.ldefs @@ -235,7 +235,7 @@ size="32" variant="PowerQUICC-III-e500mc" version="1.6" - slafile="ppc_32_be.sla" + slafile="ppc_32_e500mc_be.sla" processorspec="ppc_32.pspec" manualindexfile="../manuals/PowerPC.idx" id="PowerPC:BE:32:e500mc"> @@ -251,7 +251,7 @@ size="32" variant="PowerQUICC-III-e500mc" version="1.6" - slafile="ppc_32_le.sla" + slafile="ppc_32_e500mc_le.sla" processorspec="ppc_32.pspec" manualindexfile="../manuals/PowerPC.idx" id="PowerPC:LE:32:e500mc"> diff --git a/pypcode/processors/PowerPC/data/languages/ppc_32.pspec b/pypcode/processors/PowerPC/data/languages/ppc_32.pspec index 8c9194eb..260c1622 100644 --- a/pypcode/processors/PowerPC/data/languages/ppc_32.pspec +++ b/pypcode/processors/PowerPC/data/languages/ppc_32.pspec @@ -304,8 +304,8 @@ - - + + @@ -321,8 +321,8 @@ - - + + diff --git a/pypcode/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec b/pypcode/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec index 95fdbf33..6f223ab6 100644 --- a/pypcode/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec +++ b/pypcode/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec @@ -302,8 +302,8 @@ don't know about the DCRs though - - + + @@ -319,8 +319,8 @@ don't know about the DCRs though - - + + diff --git a/pypcode/processors/PowerPC/data/languages/ppc_64.pspec b/pypcode/processors/PowerPC/data/languages/ppc_64.pspec index 028581a0..d61b9f0b 100644 --- a/pypcode/processors/PowerPC/data/languages/ppc_64.pspec +++ b/pypcode/processors/PowerPC/data/languages/ppc_64.pspec @@ -301,8 +301,8 @@ - - + + @@ -318,8 +318,8 @@ - - + + diff --git a/pypcode/processors/Sparc/data/languages/SparcV9.ldefs b/pypcode/processors/Sparc/data/languages/SparcV9.ldefs index 95b4e5dd..97599ce7 100644 --- a/pypcode/processors/Sparc/data/languages/SparcV9.ldefs +++ b/pypcode/processors/Sparc/data/languages/SparcV9.ldefs @@ -5,7 +5,7 @@ endian="big" size="32" variant="default" - version="1.4" + version="1.5" slafile="SparcV9_32.sla" processorspec="SparcV9.pspec" manualindexfile="../manuals/Sparc.idx" @@ -20,7 +20,7 @@ endian="big" size="64" variant="default" - version="1.4" + version="1.5" slafile="SparcV9_64.sla" processorspec="SparcV9.pspec" manualindexfile="../manuals/Sparc.idx" diff --git a/pypcode/processors/Sparc/data/languages/SparcV9.sinc b/pypcode/processors/Sparc/data/languages/SparcV9.sinc index b78bff55..9aeac3e7 100644 --- a/pypcode/processors/Sparc/data/languages/SparcV9.sinc +++ b/pypcode/processors/Sparc/data/languages/SparcV9.sinc @@ -351,7 +351,13 @@ RS2: rs2 is rs2 { export rs2; } #For the destination operand RD, we export a temporary varnode with value 0. #This is because writes to g0 are allowed, but they have no visible effect (see the Sparc manual). #This way the value of g0 won't appear to change when using the pcode emulator. +# RD: rd is rd & rd_zero=0 { local tmp:$(SIZE) = 0; export tmp; } +# didrestore is picked up by call instruction only +# this will cause any instruction that assigns to the o7 return address register +# in the delay slot of a call instruction to turn the call into a call/return +# +RD: rd is rd & rd_d=15 { didrestore = 1; export rd; } RD: rd is rd { export rd; } regorimm: RS2 is i=0 & RS2 { export RS2; } @@ -486,8 +492,9 @@ macro unpackflags(ccr) { :addcc RS1,regorimm,RD is op=2 & RD & op3=0x10 & RS1 & regorimm { addflags(RS1,regorimm); - RD = RS1 + regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 + regorimm; + zeroflags(res); + RD = res; } :addc RS1,regorimm,RD is op=2 & RD & op3=0x8 & RS1 & regorimm {RD = RS1 + regorimm + zext(i_cf);} @@ -496,8 +503,9 @@ macro unpackflags(ccr) { { local original_i_cf:$(SIZE) = zext(i_cf); addCarryFlags(RS1,regorimm); - RD = RS1 + regorimm + original_i_cf; - zeroflags(RD); + local res:$(SIZE) = RS1 + regorimm + original_i_cf; + zeroflags(res); + RD = res; } #----------------------- :and RS1,regorimm,RD is op=2 & RD & op3=0x1 & RS1 & regorimm {RD = RS1 & regorimm;} @@ -505,16 +513,18 @@ macro unpackflags(ccr) { :andcc RS1,regorimm,RD is op=2 & RD & op3=0x11 & RS1 & regorimm { logicflags(); - RD = RS1 & regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 & regorimm; + zeroflags(res); + RD = res; } :andn RS1,regorimm,RD is op=2 & RD & op3=0x5 & RS1 & regorimm {RD = RS1 & ~regorimm;} :andncc RS1,regorimm,RD is op=2 & RD & op3=0x15 & RS1 & regorimm { logicflags(); - RD = RS1 & ~regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 & ~regorimm; + zeroflags(res); + RD = res; } :or RS1,regorimm,RD is op=2 & RD & op3=0x2 & RS1 & regorimm {RD = RS1 | regorimm;} @@ -522,24 +532,27 @@ macro unpackflags(ccr) { :orcc RS1,regorimm,RD is op=2 & RD & op3=0x12 & RS1 & regorimm { logicflags(); - RD = RS1 | regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 | regorimm; + zeroflags(res); + RD = res; } :orn RS1,regorimm,RD is op=2 & RD & op3=0x6 & RS1 & regorimm {RD = RS1 | ~regorimm;} :orncc RS1,regorimm,RD is op=2 & RD & op3=0x16 & RS1 & regorimm { logicflags(); - RD = RS1 | ~regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 | ~regorimm; + zeroflags(res); + RD = res; } :xor RS1,regorimm,RD is op=2 & RD & op3=0x3 & RS1 & regorimm {RD = RS1 ^ regorimm;} :xorcc RS1,regorimm,RD is op=2 & RD & op3=0x13 & RS1 & regorimm { logicflags(); - RD = RS1 ^ regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 ^ regorimm; + zeroflags(res); + RD = res; } :xnor RS1,regorimm,RD is op=2 & RD & op3=0x7 & RS1 & regorimm {RD = RS1 ^ ~regorimm;} @@ -547,8 +560,9 @@ macro unpackflags(ccr) { :xnorcc RS1,regorimm,RD is op=2 & RD & op3=0x17 & RS1 & regorimm { logicflags(); - RD = RS1 ^ ~regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 ^ ~regorimm; + zeroflags(res); + RD = res; } # --------------- @@ -609,8 +623,9 @@ macro unpackflags(ccr) { :subcc RS1,regorimm,RD is op=2 & RD & op3=0x14 & RS1 & regorimm { subflags(RS1,regorimm); - RD = RS1 - regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 - regorimm; + zeroflags(res); + RD = res; } :subc RS1,regorimm,RD is op=2 & RD & op3=0xc & RS1 & regorimm @@ -622,8 +637,9 @@ macro unpackflags(ccr) { { local original_cf:$(SIZE) = zext(i_cf); subCarryFlags(RS1,regorimm); - RD = RS1 - regorimm - original_cf; - zeroflags(RD); + local res:$(SIZE) = RS1 - regorimm - original_cf; + zeroflags(res); + RD = res; } # --------------- @@ -635,8 +651,8 @@ macro unpackflags(ccr) { :cmp RS1,regorimm is op=0x2 & rd=0x0 & op3=0x14 & RS1 & regorimm { subflags(RS1,regorimm); - local tmp = RS1 - regorimm; - zeroflags(tmp); + local res:$(SIZE) = RS1 - regorimm; + zeroflags(res); } @@ -818,52 +834,57 @@ callreloff: reloc is disp30 [reloc=inst_start+4*disp30;] { export *:$(SIZE) rel :udivx RS1,regorimm,RD is op=2 & RD & op3=0x0d & RS1 & regorimm {RD = RS1 / regorimm;} #----------------MULTIPLY 32 bit -@if SIZE=="8" -:umul RS1,regorimm,RD is op=2 & RD & op3=0x0a & RS1 & regorimm {RD = zext(RS1:4) * zext(regorimm:4); Y=RD>>32;} -:smul RS1,regorimm,RD is op=2 & RD & op3=0x0b & RS1 & regorimm {RD = sext(RS1:4) * sext(regorimm:4); Y=RD>>32;} -:umulcc RS1,regorimm,RD is op=2 & RD & op3=0x1a & RS1 & regorimm {RD = zext(RS1:4) * zext(regorimm:4); Y=RD>>32; zeroflags(RD); logicflags();} -:smulcc RS1,regorimm,RD is op=2 & RD & op3=0x1b & RS1 & regorimm {RD = sext(RS1:4) * sext(regorimm:4); Y=RD>>32; zeroflags(RD); logicflags();} -@else -# size = 4 -:umul RS1,regorimm,RD is op=2 & RD & op3=0x0a & RS1 & regorimm +:umul RS1,regorimm,RD is op=2 & RD & op3=0x0a & RS1 & regorimm { - tmp_RS1:8 = zext(RS1); - tmp_regorimm:8 = zext(regorimm); - tmp:8 = tmp_RS1 * tmp_regorimm; - RD = tmp:4; tmp2:8 = tmp >> 32; - Y = tmp2:4; + local res:8 = zext(RS1:4) * zext(regorimm:4); + Y = zext(res[32,32]); +@if SIZE=="4" + RD = res:4; # 32 bit only gets lower 4 bytes +@else + RD = res; # 64 bit gets full product +@endif } -:smul RS1,regorimm,RD is op=2 & RD & op3=0x0b & RS1 & regorimm +:smul RS1,regorimm,RD is op=2 & RD & op3=0x0b & RS1 & regorimm { - tmp_RS1:8 = sext(RS1); - tmp_regorimm:8 = sext(regorimm); - tmp:8 = tmp_RS1 * tmp_regorimm; - RD = tmp:4; tmp2:8 = tmp >> 32; - Y = tmp2:4; + local res:8 = sext(RS1:4) * sext(regorimm:4); + Y = zext(res[32,32]); +@if SIZE=="4" + RD = res:4; # 32 bit only gets lower 4 bytes +@else + RD = res; # 64 bit gets full product +@endif } -:umulcc RS1,regorimm,RD is op=2 & RD & op3=0x1a & RS1 & regorimm +:umulcc RS1,regorimm,RD is op=2 & RD & op3=0x1a & RS1 & regorimm { - RD = zext(RS1:4) * zext(regorimm:4); - Y=RD>>32; - zeroflags(RD); + local res:8 = zext(RS1:4) * zext(regorimm:4); + Y = zext(res[32,32]); + zeroflags(res:4); +@if SIZE=="4" + RD = res:4; # 32 bit only gets lower 4 bytes +@else + RD = res; # 64 bit gets full product +@endif logicflags(); } -:smulcc RS1,regorimm,RD is op=2 & RD & op3=0x1b & RS1 & regorimm +:smulcc RS1,regorimm,RD is op=2 & RD & op3=0x1b & RS1 & regorimm { - RD = zext(RS1:4) * zext(regorimm:4); - Y=RD>>32; - zeroflags(RD); + local res:8 = sext(RS1:4) * sext(regorimm:4); + Y = zext(res[32,32]); + zeroflags(res:4); +@if SIZE=="4" + RD = res:4; # 32 bit only gets lower 4 bytes +@else + RD = res; # 64 bit gets full product +@endif logicflags(); } -@endif - #----------------MULTIPLY Step -:mulscc RS1,regorimm,RD is op=2 & RD & op3=0x24 & RS1 & regorimm +:mulscc RS1,regorimm,RD is op=2 & RD & op3=0x24 & RS1 & regorimm { local ccr:4 = zext(i_nf ^^ i_vf); ccr = ccr << 31; @@ -877,48 +898,53 @@ callreloff: reloc is disp30 [reloc=inst_start+4*disp30;] { export *:$(SIZE) rel addflags32(addend,shifted); #upper 32 bits of RD are undefined according to the manual local tbit:4 = (RS1:4 & 0x1:4) << 31; - RD = zext(sum); - zeroflags(RD); + local res:$(SIZE) = zext(sum); + zeroflags(res); + RD = res; #Y is 64 bits in Sparc 9 but the high 32 are fixed to 0 Y = zext((Y:4 >> 1:4) | tbit); } - + #----------------DIVIDE (64-bit / 32-bit) # NB- Beware, the plus + operator has higher precedence than shift << # (These are Java rules. C rules have shift and + at the same level, so left to right) -:udiv RS1,regorimm,RD is op=2 & RD & op3=0x0e & RS1 & regorimm +:udiv RS1,regorimm,RD is op=2 & RD & op3=0x0e & RS1 & regorimm { - numerator:$(SIZE)= (Y << 32) + (RS1 & 0xffffffff); - denom:$(SIZE) = regorimm & 0xffffffff; - RD = numerator / denom; + numerator:8 = (zext(Y) << 32) + zext(RS1:4); + denom:8 = zext(regorimm:4); + local res:8 = numerator / denom; + RD = zext(res:4); } -:sdiv RS1,regorimm,RD is op=2 & RD & op3=0x0f & RS1 & regorimm +:sdiv RS1,regorimm,RD is op=2 & RD & op3=0x0f & RS1 & regorimm { - numerator:$(SIZE)= (Y << 32) + (RS1 & 0xffffffff); - denom:$(SIZE) = regorimm & 0xffffffff; - RD = numerator s/ denom; -} + numerator:8 = (sext(Y) << 32) + zext(RS1:4); + denom:8 = sext(regorimm:4); + local res:8 = numerator s/ denom; + RD = sext(res:4); +} -:udivcc RS1,regorimm,RD is op=2 & RD & op3=0x1e & RS1 & regorimm +:udivcc RS1,regorimm,RD is op=2 & RD & op3=0x1e & RS1 & regorimm { - numerator:$(SIZE)= ( Y << 32) + (RS1 & 0xffffffff); - denom:$(SIZE) = regorimm & 0xffffffff; - RD = numerator / denom; - zeroflags(RD); - i_vf = RD > 0xffffffff; + numerator:8 = (zext(Y) << 32) + zext(RS1:4); + denom:8 = zext(regorimm:4); + local res:8 = numerator / denom; + zeroflags(res:4); + RD = zext(res:4); + i_vf = res > 0xffffffff; i_cf = 0; x_vf = 0; x_cf = 0; } -:sdivcc RS1,regorimm,RD is op=2 & RD & op3=0x1f & RS1 & regorimm +:sdivcc RS1,regorimm,RD is op=2 & RD & op3=0x1f & RS1 & regorimm { - numerator:$(SIZE)= (Y << 32) + (RS1 & 0xffffffff); - denom:$(SIZE) = regorimm & 0xffffffff; - RD = numerator s/ denom; - zeroflags(RD); - i_vf = (RD s>= 0x80000000) || (RD s<= -0x7ffffffff); + numerator:8 = (sext(Y) << 32) + (zext(RS1) & 0xffffffff); + denom:8 = sext(regorimm:4); + local res:8 = numerator s/ denom; + zeroflags(res:4); + RD = sext(res:4); + i_vf = (res s>= 0x80000000) || (res s<= -0x7ffffffff); i_cf = 0; x_vf = 0; x_cf = 0; @@ -984,69 +1010,96 @@ sethidisp: "%hi("^hi^")" is udisp22 [hi=udisp22<<10;] { export *[const]:$(SIZE) :restore RS1,regorimm,RD is op=0x2 & RD & op3=0x3d & RS1 & regorimm { local tmp = RS1 + regorimm; restore(); didrestore=1; RD = tmp; } :restore is op=0x2 & rd=0 & op3=0x3d { restore(); didrestore=1; } +# FIXME 'jmpl' can have 'return' in the delayslot to return from a user trap handler +# @see PR #6285 + :return retea is op=0x2 & op3=0x39 & retea { build retea; restore(); delayslot(1); didrestore=1; return [retea]; } :jmpl retea,RD is op=0x2 & RD & op3=0x38 & retea { build retea; RD = inst_start; delayslot(1); goto [retea]; } # special case where link register is loaded with return address; functions as indirect call :jmpl retea,RD is op=0x2 & RD & prd=15 & op3=0x38 & retea { build retea; RD = inst_start; delayslot(1); call [retea]; } +:jmpl retea is op=0x2 & rd=0 & op3=0x38 & retea { build retea; delayslot(1); goto [retea]; } + +# special case: when returning a structure, some software inserts unimpl instruction after every caller +# jumps to linkRegister(o7)+12, instead of normal linkregister(o7)+8 +:jmpl retea is op=0x2 & rd=0 & rs1=31 & op3=0x38 & i=1 & simm13=12 & retea { build retea; delayslot(1); return [retea]; } +:jmpl retea is op=0x2 & rd=0 & rs1=15 & op3=0x38 & i=1 & simm13=12 & retea { build retea; delayslot(1); return [retea]; } -:jmpl retea is op=0x2 & rd=0 & op3=0x38 & retea { build retea; delayslot(1); goto [retea]; } +# really jmpl instruction using linkRegister(o7)+8 :ret is op=0x2 & rd=0 & rs1=31 & op3=0x38 & i=1 & simm13=8 & retea { build retea; delayslot(1); return [retea]; } :retl is op=0x2 & rd=0 & rs1=15 & op3=0x38 & i=1 & simm13=8 & retea { build retea; delayslot(1); return [retea]; } casa_ea: [RS1]imm_asi is i=0 & RS1 & imm_asi { local tmp1:1 = imm_asi; local tmp = RS1+segment(tmp1); export tmp; } casa_ea: [RS1]%ASI is i=1 & RS1 & ASI { local tmp = RS1+segment(ASI); export tmp; } -:casa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3c & casa_ea & RS2 -{ - local tmp:4=RD:4; - RD=zext(*:4 casa_ea); - if ((RS2 & 0xFFFFFFFF)!=RD) goto ; - *:4 casa_ea=tmp; +:casa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3c & casa_ea & RS2 +{ + local tmp:4=RD:4; + local tmp2:$(SIZE) = RS2; + local tmp_ea:$(SIZE) = casa_ea; + RD=zext(*:4 tmp_ea); + if ((tmp2 & 0xFFFFFFFF)!=RD) goto ; + *:4 tmp_ea=tmp; } -:casxa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3e & casa_ea & RS2 -{ - local tmp=RD; - RD=*:$(SIZE) casa_ea; - if (RS2!=RD) goto ; - *:$(SIZE) casa_ea=tmp; +:casxa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3e & casa_ea & RS2 +{ + local tmp=RD; + local tmp2:$(SIZE) = RS2; + local tmp_ea:$(SIZE) = casa_ea; + RD=*:$(SIZE) tmp_ea; + if (tmp2!=RD) goto ; + *:$(SIZE) tmp_ea=tmp; } :impdef1 is op=0x2 & op3=0x36 unimpl :impdef2 is op=0x2 & op3=0x37 unimpl -:ldstub ea,RD is op=0x3 & RD & op3=0xd & ea { RD = zext(*:1 ea); *:1 ea = 0xFF; } -:ldstuba ea_alt,RD is op=0x3 & RD & op3=0x1d & ea_alt { RD = zext(*:1 ea_alt); *:1 ea_alt = 0xFF; } +:ldstub ea,RD is op=0x3 & RD & op3=0xd & ea +{ + local tmp_ea:$(SIZE) = ea; + RD = zext(*:1 tmp_ea); + *:1 tmp_ea = 0xFF; +} +:ldstuba ea_alt,RD is op=0x3 & RD & op3=0x1d & ea_alt +{ + local tmp_ea:$(SIZE) = ea_alt; + RD = zext(*:1 tmp_ea); + *:1 tmp_ea = 0xFF; +} -:swap ea,RD is op=0x3 & RD & op3=0xF & ea { tmp:4=RD:4; RD = zext(*:4 ea); *:4 ea = tmp; } -:swapa ea_alt,RD is op=0x3 & RD & op3=0x1F & ea_alt { tmp:4=RD:4; RD = zext(*:4 ea_alt); *:4 ea_alt = tmp; } +:swap ea,RD is op=0x3 & RD & op3=0xF & ea { local tmp_ea:$(SIZE) = ea; tmp:4=RD:4; RD = zext(*:4 tmp_ea); *:4 tmp_ea = tmp; } +:swapa ea_alt,RD is op=0x3 & RD & op3=0x1F & ea_alt { local tmp_ea:$(SIZE) = ea_alt; tmp:4=RD:4; RD = zext(*:4 tmp_ea); *:4 tmp_ea = tmp; } -:taddcc RS1,regorimm,RD is op=2 & RD & op3=0x20 & RS1 & regorimm +:taddcc RS1,regorimm,RD is op=2 & RD & op3=0x20 & RS1 & regorimm { taddflags(RS1,regorimm); - RD = RS1 + regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 + regorimm; + zeroflags(res); + RD = res; } -:taddcctv RS1,regorimm,RD is op=2 & RD & op3=0x22 & RS1 & regorimm +:taddcctv RS1,regorimm,RD is op=2 & RD & op3=0x22 & RS1 & regorimm { taddflags(RS1,regorimm); - RD = RS1 + regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 + regorimm; + zeroflags(res); + RD = res; } -:tsubcc RS1,regorimm,RD is op=2 & RD & op3=0x21 & RS1 & regorimm +:tsubcc RS1,regorimm,RD is op=2 & RD & op3=0x21 & RS1 & regorimm { tsubflags(RS1,regorimm); - RD = RS1 - regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 - regorimm; + zeroflags(res); + RD = res; } -:tsubcctv RS1,regorimm,RD is op=2 & RD & op3=0x23 & RS1 & regorimm +:tsubcctv RS1,regorimm,RD is op=2 & RD & op3=0x23 & RS1 & regorimm { tsubflags(RS1,regorimm); - RD = RS1 - regorimm; - zeroflags(RD); + local res:$(SIZE) = RS1 - regorimm; + zeroflags(res); + RD = res; } tcc: icc is cc1_4=0 & cc0_4=0 & icc { export icc; } @@ -1058,7 +1111,13 @@ TICC: "%xcc" is cc1_4=1 &cc0_4=0 { } trap: RS1+RS2 is i=0 & RS1 & RS2 { local tmp = ((RS1 + RS2) & 0x7F); export tmp; } trap: RS1+swtrap is i=1 & RS1 & swtrap { local tmp = ((RS1 + swtrap) & 0x7F); export tmp; } -:t^tcc TICC, trap is op=0x2 & op3=0x3a & tcc & TICC & trap { sw_trap(trap); } +:t^tcc TICC, trap is op=0x2 & op3=0x3a & tcc & TICC & trap +{ + if (!tcc) goto inst_next; + local dest:$(SIZE) = sw_trap(trap); + # trap should fall thru by default, can be over-ridden to a branch/call-return + call [dest]; +} membar_mask: is cmask & mmask { tmp:1 = (cmask << 4) | mmask; export tmp; } @@ -1134,9 +1193,9 @@ resv30: "%resv30" is fcn { local reloc = zext(TL == 1)*&RESV30_1 + zext(TL == define pcodeop IllegalInstructionTrap; :illtrap const22 is op = 0 & op2 = 0 & const22 { - IllegalInstructionTrap(const22:4); - tmp:$(SIZE) = 0; # trap - don't fall-thru - return [ tmp ]; + local dest:$(SIZE) = IllegalInstructionTrap(const22:4); + # trap should not fall thru by default, can be over-ridden to a call + goto [dest]; } :prefetch ea,fcn is op=3 & fcn & op3 = 0x2d & ea {} diff --git a/pypcode/processors/Sparc/data/languages/SparcV9_32.cspec b/pypcode/processors/Sparc/data/languages/SparcV9_32.cspec index 28f5f300..78fedadc 100644 --- a/pypcode/processors/Sparc/data/languages/SparcV9_32.cspec +++ b/pypcode/processors/Sparc/data/languages/SparcV9_32.cspec @@ -30,41 +30,72 @@ - + + + + - + - + - + - + - + + + + + + + + + + + + + - + - - + + - - + + - + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/Sparc/data/languages/SparcV9_64.cspec b/pypcode/processors/Sparc/data/languages/SparcV9_64.cspec index 67ccac70..a176a8a4 100644 --- a/pypcode/processors/Sparc/data/languages/SparcV9_64.cspec +++ b/pypcode/processors/Sparc/data/languages/SparcV9_64.cspec @@ -14,7 +14,7 @@ - + @@ -30,6 +30,9 @@ + + + @@ -42,53 +45,72 @@ - - - - - - - - + + - - + + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + - - - - + - - + + - + + + + + + + + + @@ -127,22 +149,22 @@ - + - + - + - + - + - + @@ -150,7 +172,16 @@ - + + + + + + + + + + @@ -187,4 +218,32 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/Sparc/data/patterns/SPARC_patterns.xml b/pypcode/processors/Sparc/data/patterns/SPARC_patterns.xml index 3d1976d2..9a3de958 100644 --- a/pypcode/processors/Sparc/data/patterns/SPARC_patterns.xml +++ b/pypcode/processors/Sparc/data/patterns/SPARC_patterns.xml @@ -2,34 +2,40 @@ 0x81f00000 - 0x81c7e008 0x........ - 0x81c7e008 0x........ 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c7e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c3e008 0x........ - 0x81c3e008 0x........ 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81c3e008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81cfe008 0x........ - 0x81cfe008 0x........ 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 - 0x81cfe008 0x........ 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 0x00000000 + 0x81c7e008 0x........ + 0x81c7e008 0x........ 0000000. 0x000000 + 0x81c3e008 0x........ + 0x81c3e008 0x........ 0000000. 0x000000 + 0x81cfe008 0x........ + 0x81cfe008 0x........ 0000000. 0x000000 + 0x10 101..... 0x.... 0x........ + 0x10 101..... 0x.... 0x........ 0000000. 0x000000 + 0x30 101..... 0x.... + 0x30 101..... 0x.... 0000000. 0x000000 + 01...... 0x...... 10.....1 11101... 0x.... + 01...... 0x...... 10.....1 11101... 0x.... 0000000. 0x000000 + 01...... 0x...... 0x9E 00010... 0x.... + 01...... 0x...... 0x9E 00010... 0x.... 0000000. 0x000000 + 0000000. 0x000000 0000000. 0x000000 10011101 11100011 10111... ........ + + + 0x81 0xc3 0xe0 0x08 0xae 0x03 0xc0 0x17 + + + + + 0x81 0xc3 0xe0 0x08 0x82 0x03 0xc0 0x01 + + + + + 0x81 0xc3 0xe0 0x08 0x90 0x02 0x00 0x0f + + diff --git a/pypcode/processors/x86/data/languages/avx.sinc b/pypcode/processors/x86/data/languages/avx.sinc index 9b34ec7a..08badb2c 100644 --- a/pypcode/processors/x86/data/languages/avx.sinc +++ b/pypcode/processors/x86/data/languages/avx.sinc @@ -3,18 +3,23 @@ # INFO Command line arguments: ['--sinc', '--skip-sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx_manual.sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/ia.sinc'] # ADDPD 3-33 PAGE 603 LINE 33405 -define pcodeop vaddpd_avx ; :VADDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vaddpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local m:16 = XmmReg2_m128; + XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f+ m[0,64]; + XmmReg1[64,64] = vexVVVV_XmmReg[64,64] f+ m[64,64]; + ZmmReg1 = zext(XmmReg1); } # ADDPD 3-33 PAGE 603 LINE 33408 :VADDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:32 = vaddpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local m:32 = YmmReg2_m256; + YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f+ m[0,64]; + YmmReg1[64,64] = vexVVVV_YmmReg[64,64] f+ m[64,64]; + YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f+ m[128,64]; + YmmReg1[192,64] = vexVVVV_YmmReg[192,64] f+ m[192,64]; + ZmmReg1 = zext(YmmReg1); } # ADDPS 3-36 PAGE 606 LINE 33558 @@ -686,88 +691,103 @@ define pcodeop vcvtps2pd_avx ; @endif # CVTSS2SD 3-261 PAGE 831 LINE 44744 -define pcodeop vcvtss2sd_avx ; -:VCVTSS2SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +:VCVTSS2SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { - local tmp:16 = vcvtss2sd_avx( vexVVVV_XmmReg, XmmReg2_m32 ); - ZmmReg1 = zext(tmp); + local tmp:8 = float2float( XmmReg2_m32[0,32] ); + XmmReg1[0,64] = tmp; + XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; + ZmmReg1 = zext(XmmReg1); } # CVTSS2SI 3-263 PAGE 833 LINE 44835 -define pcodeop vcvtss2si_avx ; -:VCVTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m32 +:VCVTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m32 { - Reg32 = vcvtss2si_avx( XmmReg2_m32 ); - # TODO Reg64 = zext(Reg32) + Reg32 = trunc(round(XmmReg2_m32[0,32])); } # CVTSS2SI 3-263 PAGE 833 LINE 44837 @ifdef IA64 -:VCVTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m32 +:VCVTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m32 { - Reg64 = vcvtss2si_avx( XmmReg2_m32 ); + Reg64 = trunc(round(XmmReg2_m32[0,32])); } + @endif # CVTTPD2DQ 3-265 PAGE 835 LINE 44930 -define pcodeop vcvttpd2dq_avx ; -:VCVTTPD2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +:VCVTTPD2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vcvttpd2dq_avx( XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local tmp:16 = XmmReg2_m128; + XmmReg1[0,32] = trunc(tmp[0,64]); + XmmReg1[32,32] = trunc(tmp[64,64]); + XmmReg1[64,32] = 0; + XmmReg1[96,32] = 0; + ZmmReg1 = zext(XmmReg1); } # CVTTPD2DQ 3-265 PAGE 835 LINE 44933 -:VCVTTPD2DQ XmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256 +:VCVTTPD2DQ XmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:16 = vcvttpd2dq_avx( YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local tmp:32 = YmmReg2_m256; + XmmReg1[0,32] = trunc(tmp[0,64]); + XmmReg1[32,32] = trunc(tmp[64,64]); + XmmReg1[64,32] = trunc(tmp[128,64]); + XmmReg1[96,32] = trunc(tmp[192,64]); + ZmmReg1 = zext(XmmReg1); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45163 -define pcodeop vcvttps2dq_avx ; -:VCVTTPS2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 +:VCVTTPS2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vcvttps2dq_avx( XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local tmp:16 = XmmReg2_m128; + XmmReg1[0,32] = trunc(tmp[0,32]); + XmmReg1[32,32] = trunc(tmp[32,32]); + XmmReg1[64,32] = trunc(tmp[64,32]); + XmmReg1[96,32] = trunc(tmp[96,32]); + ZmmReg1 = zext(XmmReg1); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45166 -:VCVTTPS2DQ YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 +:VCVTTPS2DQ YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:32 = vcvttps2dq_avx( YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local tmp:32 = YmmReg2_m256; + YmmReg1[0,32] = trunc(tmp[0,32]); + YmmReg1[32,32] = trunc(tmp[32,32]); + YmmReg1[64,32] = trunc(tmp[64,32]); + YmmReg1[96,32] = trunc(tmp[96,32]); + YmmReg1[128,32] = trunc(tmp[128,32]); + YmmReg1[160,32] = trunc(tmp[160,32]); + YmmReg1[192,32] = trunc(tmp[192,32]); + YmmReg1[224,32] = trunc(tmp[224,32]); + ZmmReg1 = zext(YmmReg1); } # CVTTSD2SI 3-274 PAGE 844 LINE 45379 -define pcodeop vcvttsd2si_avx ; -:VCVTTSD2SI Reg32, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m64 +:VCVTTSD2SI Reg32, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m64 { - Reg32 = vcvttsd2si_avx( XmmReg2_m64 ); - # TODO Reg64 = zext(Reg32) + Reg32 = trunc(XmmReg2_m64[0,64]); } # CVTTSD2SI 3-274 PAGE 844 LINE 45382 @ifdef IA64 -:VCVTTSD2SI Reg64, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m64 +:VCVTTSD2SI Reg64, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m64 { - Reg64 = vcvttsd2si_avx( XmmReg2_m64 ); + Reg64 = trunc(XmmReg2_m64[0,64]); } + @endif # CVTTSS2SI 3-276 PAGE 846 LINE 45473 -define pcodeop vcvttss2si_avx ; -:VCVTTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m32 +:VCVTTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m32 { - Reg32 = vcvttss2si_avx( XmmReg2_m32 ); - # TODO Reg64 = zext(Reg32) + Reg32 = trunc(XmmReg2_m32[0,32]); } # CVTTSS2SI 3-276 PAGE 846 LINE 45476 @ifdef IA64 -:VCVTTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m32 +:VCVTTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m32 { - Reg64 = vcvttss2si_avx( XmmReg2_m32 ); + Reg64 = trunc(XmmReg2_m32[0,32]); } @endif @@ -802,19 +822,19 @@ define pcodeop vdivps_avx ; } # DIVSD 3-294 PAGE 864 LINE 46312 -define pcodeop vdivsd_avx ; -:VDIVSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 +:VDIVSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { - local tmp:16 = vdivsd_avx( vexVVVV_XmmReg, XmmReg2_m64 ); - ZmmReg1 = zext(tmp); + XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f/ XmmReg2_m64[0,64]; + XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; + ZmmReg1 = zext(XmmReg1); } # DIVSS 3-296 PAGE 866 LINE 46410 -define pcodeop vdivss_avx ; -:VDIVSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 +:VDIVSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { - local tmp:16 = vdivss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); - ZmmReg1 = zext(tmp); + XmmReg1[0,32] = vexVVVV_XmmReg[0,32] f/ XmmReg2_m32[0,32]; + XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; + ZmmReg1 = zext(XmmReg1); } # DPPD 3-298 PAGE 868 LINE 46509 @@ -848,18 +868,23 @@ define pcodeop vextractps_avx ; } # HADDPD 3-427 PAGE 997 LINE 52447 -define pcodeop vhaddpd_avx ; :VHADDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vhaddpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local m:16 = XmmReg2_m128; + XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f+ vexVVVV_XmmReg[64,64]; + XmmReg1[64,64] = m[0,64] f+ m[64,64]; + ZmmReg1 = zext(XmmReg1); } # HADDPD 3-427 PAGE 997 LINE 52450 :VHADDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:32 = vhaddpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local m:32 = YmmReg2_m256; + YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f+ vexVVVV_YmmReg[64,64]; + YmmReg1[64,64] = m[0,64] f+ m[64,64]; + YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f+ vexVVVV_YmmReg[192,64]; + YmmReg1[192,64] = m[128,64] f+ m[192,64]; + ZmmReg1 = zext(YmmReg1); } # HADDPS 3-430 PAGE 1000 LINE 52586 @@ -1096,17 +1121,12 @@ define pcodeop vminss_avx ; } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61932 -:VMOVDQU m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 ... & m128 +:VMOVDQU XmmReg2_m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; (XmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128 { - m128 = XmmReg1; + XmmReg2_m128 = XmmReg1; + build XmmReg2_m128_extend; } -:VMOVDQU XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 & (mod=3 & XmmReg2 & ZmmReg2) -{ - ZmmReg2 = zext( XmmReg1 ); -} - - # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61934 :VMOVDQU YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { @@ -1358,14 +1378,10 @@ define pcodeop vmovsldup_avx ; } # MOVUPD 4-126 PAGE 1246 LINE 64689 -:VMOVUPD m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 ... & m128 +:VMOVUPD XmmReg2_m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; (XmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128 { - m128 = XmmReg1; -} - -:VMOVUPD XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 & ( mod=3 & XmmReg2 & ZmmReg2 ) -{ - ZmmReg2 = zext( XmmReg1 ); + XmmReg2_m128 = XmmReg1; + build XmmReg2_m128_extend; } # MOVUPD 4-126 PAGE 1246 LINE 64691 @@ -1396,39 +1412,54 @@ define pcodeop vmpsadbw_avx ; } # MULPD 4-146 PAGE 1266 LINE 65682 -define pcodeop vmulpd_avx ; :VMULPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vmulpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local m:16 = XmmReg2_m128; + XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f* m[0,64]; + XmmReg1[64,64] = vexVVVV_XmmReg[64,64] f* m[64,64]; + ZmmReg1 = zext(XmmReg1); } # MULPD 4-146 PAGE 1266 LINE 65684 :VMULPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:32 = vmulpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local m:32 = YmmReg2_m256; + YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f* m[0,64]; + YmmReg1[64,64] = vexVVVV_YmmReg[64,64] f* m[64,64]; + YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f* m[128,64]; + YmmReg1[192,64] = vexVVVV_YmmReg[192,64] f* m[192,64]; + ZmmReg1 = zext(YmmReg1); } # MULPS 4-149 PAGE 1269 LINE 65813 -define pcodeop vmulps_avx ; :VMULPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:16 = vmulps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); - ZmmReg1 = zext(tmp); + local m:16 = XmmReg2_m128; + XmmReg1[0,32] = vexVVVV_XmmReg[0,32] f* m[0,32]; + XmmReg1[32,32] = vexVVVV_XmmReg[32,32] f* m[32,32]; + XmmReg1[64,32] = vexVVVV_XmmReg[64,32] f* m[64,32]; + XmmReg1[96,32] = vexVVVV_XmmReg[96,32] f* m[96,32]; + ZmmReg1 = zext(XmmReg1); } # MULPS 4-149 PAGE 1269 LINE 65815 :VMULPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { - local tmp:32 = vmulps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); - ZmmReg1 = zext(tmp); + local m:32 = YmmReg2_m256; + YmmReg1[0,32] = vexVVVV_YmmReg[0,32] f* m[0,32]; + YmmReg1[32,32] = vexVVVV_YmmReg[32,32] f* m[32,32]; + YmmReg1[64,32] = vexVVVV_YmmReg[64,32] f* m[64,32]; + YmmReg1[96,32] = vexVVVV_YmmReg[96,32] f* m[96,32]; + YmmReg1[128,32] = vexVVVV_YmmReg[128,32] f* m[128,32]; + YmmReg1[160,32] = vexVVVV_YmmReg[160,32] f* m[160,32]; + YmmReg1[192,32] = vexVVVV_YmmReg[192,32] f* m[192,32]; + ZmmReg1 = zext(YmmReg1); } # MULSD 4-152 PAGE 1272 LINE 65956 :VMULSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { - local tmp:8 = vexVVVV_XmmReg[0,64] f* XmmReg2_m64[0,64]; + local tmp:8 = vexVVVV_XmmReg[0,64] f* XmmReg2_m64[0,64]; ZmmReg1 = zext(tmp); } @@ -2770,49 +2801,88 @@ define pcodeop vunpcklps_avx ; } # VBROADCAST 5-12 PAGE 1836 LINE 94909 -define pcodeop vbroadcastss_avx ; -:VBROADCASTSS XmmReg1, m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (XmmReg1 & ZmmReg1) ... & m32 +:VBROADCASTSS XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { - local tmp:16 = vbroadcastss_avx( m32 ); - ZmmReg1 = zext(tmp); + local val:4 = XmmReg2_m32[0,32]; + XmmReg1[0,32] = val; + XmmReg1[32,32] = val; + XmmReg1[64,32] = val; + XmmReg1[96,32] = val; + ZmmReg1 = zext(XmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94911 -:VBROADCASTSS YmmReg1, m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (YmmReg1 & ZmmReg1) ... & m32 -{ - local tmp:32 = vbroadcastss_avx( m32 ); - ZmmReg1 = zext(tmp); +:VBROADCASTSS YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 +{ + local val:4 = XmmReg2_m32[0,32]; + YmmReg1[0,32] = val; + YmmReg1[32,32] = val; + YmmReg1[64,32] = val; + YmmReg1[96,32] = val; + YmmReg1[128,32] = val; + YmmReg1[160,32] = val; + YmmReg1[192,32] = val; + YmmReg1[224,32] = val; + ZmmReg1 = zext(YmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94913 define pcodeop vbroadcastsd_avx ; -:VBROADCASTSD YmmReg1, m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x19; (YmmReg1 & ZmmReg1) ... & m64 +:VBROADCASTSD YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x19; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { - local tmp:32 = vbroadcastsd_avx( m64 ); - ZmmReg1 = zext(tmp); + local val:8 = XmmReg2_m64[0,64]; + YmmReg1[0,64] = val; + YmmReg1[64,64] = val; + YmmReg1[128,64] = val; + YmmReg1[192,64] = val; + ZmmReg1 = zext(YmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94915 define pcodeop vbroadcastf128_avx ; -:VBROADCASTF128 YmmReg1, m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1A; (YmmReg1 & ZmmReg1) ... & m128 +:VBROADCASTF128 YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:32 = vbroadcastf128_avx( m128 ); - ZmmReg1 = zext(tmp); + local val:16 = XmmReg2_m128; + YmmReg1[0,128] = val; + YmmReg1[128,128] = val; + ZmmReg1 = zext(YmmReg1); } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99102 -define pcodeop vextractf128_avx ; -:VEXTRACTF128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x19; YmmReg1 ... & XmmReg2_m128; imm8 +:VEXTRACTF128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x19; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { - XmmReg2_m128 = vextractf128_avx( YmmReg1, imm8:1 ); + local ext:1 = (imm8:1 & 1) == 1; + + local val:16 = YmmReg1[0,128]; + + if (ext == 0) goto ; + + val = YmmReg1[128,128]; + + + XmmReg2_m128 = val; + build XmmReg2_m128_extend; } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109703 -define pcodeop vinsertf128_avx ; -:VINSERTF128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 +:VINSERTF128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { - local tmp:32 = vinsertf128_avx( vexVVVV_YmmReg, XmmReg2_m128, imm8:1 ); - ZmmReg1 = zext(tmp); + local ext:1 = (imm8:1 & 1) == 1; + local src1_0 = vexVVVV_YmmReg[0, 128]; + local src1_1 = vexVVVV_YmmReg[128, 128]; + + src2:16 = XmmReg2_m128; + + YmmReg1[0,128] = src2; + YmmReg1[128,128] = src1_1; + + if (ext == 0) goto ; + + YmmReg1[0,128] = src1_0; + YmmReg1[128,128] = src2; + + + ZmmReg1 = zext(YmmReg1); } # VMASKMOV 5-318 PAGE 2142 LINE 110151 @@ -3038,9 +3108,10 @@ define pcodeop vcvtps2ph_f16c ; } # VCVTPS2PH 5-37 PAGE 1861 LINE 96113 -:VCVTPS2PH XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x1D; YmmReg1 ... & XmmReg2_m128; imm8 +:VCVTPS2PH XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x1D; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { XmmReg2_m128 = vcvtps2ph_f16c( YmmReg1, imm8:1 ); + build XmmReg2_m128_extend; } diff --git a/pypcode/processors/x86/data/languages/avx2.sinc b/pypcode/processors/x86/data/languages/avx2.sinc index 416a1761..5bba08c7 100644 --- a/pypcode/processors/x86/data/languages/avx2.sinc +++ b/pypcode/processors/x86/data/languages/avx2.sinc @@ -896,10 +896,19 @@ define pcodeop vpunpcklqdq_avx2 ; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99432 -:VEXTRACTI128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x39; YmmReg1 ... & XmmReg2_m128; imm8 +:VEXTRACTI128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x39; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { - local ext:1 = imm8:1 == 1; - XmmReg2_m128 = (YmmReg1[0,128] * zext(ext==0)) | (YmmReg1[128,128] * zext(ext==1)); + local ext:1 = (imm8:1 & 1) == 1; + + local val:16 = YmmReg1[0,128]; + + if (ext == 0) goto ; + + val = YmmReg1[128,128]; + + + XmmReg2_m128 = val; + build XmmReg2_m128_extend; } # VPBLENDD 5-321 PAGE 2145 LINE 110309 @@ -918,71 +927,163 @@ define pcodeop vpblendd_avx2 ; } # VPBROADCAST 5-331 PAGE 2155 LINE 110776 -define pcodeop vpbroadcastb_avx2 ; :VPBROADCASTB XmmReg1, XmmReg2_m8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m8 { - local tmp:16 = vpbroadcastb_avx2( XmmReg2_m8 ); - ZmmReg1 = zext(tmp); + local val:1 = XmmReg2_m8[0,8]; + XmmReg1[0,8] = val; + XmmReg1[8,8] = val; + XmmReg1[16,8] = val; + XmmReg1[24,8] = val; + XmmReg1[32,8] = val; + XmmReg1[40,8] = val; + XmmReg1[48,8] = val; + XmmReg1[56,8] = val; + XmmReg1[64,8] = val; + XmmReg1[72,8] = val; + XmmReg1[80,8] = val; + XmmReg1[88,8] = val; + XmmReg1[96,8] = val; + XmmReg1[104,8] = val; + XmmReg1[112,8] = val; + XmmReg1[120,8] = val; + ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110778 :VPBROADCASTB YmmReg1, XmmReg2_m8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m8 { - local tmp:32 = vpbroadcastb_avx2( XmmReg2_m8 ); - ZmmReg1 = zext(tmp); + local val:1 = XmmReg2_m8[0,8]; + YmmReg1[0,8] = val; + YmmReg1[8,8] = val; + YmmReg1[16,8] = val; + YmmReg1[24,8] = val; + YmmReg1[32,8] = val; + YmmReg1[40,8] = val; + YmmReg1[48,8] = val; + YmmReg1[56,8] = val; + YmmReg1[64,8] = val; + YmmReg1[72,8] = val; + YmmReg1[80,8] = val; + YmmReg1[88,8] = val; + YmmReg1[96,8] = val; + YmmReg1[104,8] = val; + YmmReg1[112,8] = val; + YmmReg1[120,8] = val; + + YmmReg1[128,8] = val; + YmmReg1[136,8] = val; + YmmReg1[144,8] = val; + YmmReg1[152,8] = val; + YmmReg1[160,8] = val; + YmmReg1[168,8] = val; + YmmReg1[176,8] = val; + YmmReg1[184,8] = val; + YmmReg1[192,8] = val; + YmmReg1[200,8] = val; + YmmReg1[208,8] = val; + YmmReg1[216,8] = val; + YmmReg1[224,8] = val; + YmmReg1[232,8] = val; + YmmReg1[240,8] = val; + YmmReg1[248,8] = val; + + ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110787 -define pcodeop vpbroadcastw_avx2 ; :VPBROADCASTW XmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { - local tmp:16 = vpbroadcastw_avx2( XmmReg2_m16 ); - ZmmReg1 = zext(tmp); + local val:2 = XmmReg2_m16[0,16]; + XmmReg1[0,16] = val; + XmmReg1[16,16] = val; + XmmReg1[32,16] = val; + XmmReg1[48,16] = val; + XmmReg1[64,16] = val; + XmmReg1[80,16] = val; + XmmReg1[96,16] = val; + XmmReg1[112,16] = val; + + ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110789 :VPBROADCASTW YmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m16 { - local tmp:32 = vpbroadcastw_avx2( XmmReg2_m16 ); - ZmmReg1 = zext(tmp); + local val:2 = XmmReg2_m16[0,16]; + YmmReg1[0,16] = val; + YmmReg1[16,16] = val; + YmmReg1[32,16] = val; + YmmReg1[48,16] = val; + YmmReg1[64,16] = val; + YmmReg1[80,16] = val; + YmmReg1[96,16] = val; + YmmReg1[112,16] = val; + + YmmReg1[128,16] = val; + YmmReg1[144,16] = val; + YmmReg1[160,16] = val; + YmmReg1[176,16] = val; + YmmReg1[192,16] = val; + YmmReg1[208,16] = val; + YmmReg1[224,16] = val; + YmmReg1[240,16] = val; + + ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110800 -define pcodeop vpbroadcastd_avx2 ; :VPBROADCASTD XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { - local tmp:16 = vpbroadcastd_avx2( XmmReg2_m32 ); - ZmmReg1 = zext(tmp); + local val:4 = XmmReg2_m32[0,32]; + XmmReg1[0,32] = val; + XmmReg1[32,32] = val; + XmmReg1[64,32] = val; + XmmReg1[96,32] = val; + ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110802 :VPBROADCASTD YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 { - local tmp:32 = vpbroadcastd_avx2( XmmReg2_m32 ); - ZmmReg1 = zext(tmp); + local val:4 = XmmReg2_m32[0,32]; + YmmReg1[0,32] = val; + YmmReg1[32,32] = val; + YmmReg1[64,32] = val; + YmmReg1[96,32] = val; + YmmReg1[128,32] = val; + YmmReg1[160,32] = val; + YmmReg1[192,32] = val; + YmmReg1[224,32] = val; + ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110813 -define pcodeop vpbroadcastq_avx2 ; :VPBROADCASTQ XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { - local tmp:16 = vpbroadcastq_avx2( XmmReg2_m64 ); - ZmmReg1 = zext(tmp); + local val:8 = XmmReg2_m64[0,64]; + XmmReg1[0,64] = val; + XmmReg1[64,64] = val; + ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110815 :VPBROADCASTQ YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { - local tmp:32 = vpbroadcastq_avx2( XmmReg2_m64 ); - ZmmReg1 = zext(tmp); + local val:8 = XmmReg2_m64[0,64]; + YmmReg1[0,64] = val; + YmmReg1[64,64] = val; + YmmReg1[128,64] = val; + YmmReg1[192,64] = val; + ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-332 PAGE 2156 LINE 110843 -define pcodeop vbroadcasti128_avx2 ; -:VBROADCASTI128 YmmReg1, m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5A; (YmmReg1 & ZmmReg1) ... & m128 +:VBROADCASTI128 YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { - local tmp:32 = vbroadcasti128_avx2( m128 ); - ZmmReg1 = zext(tmp); + local val:16 = XmmReg2_m128; + YmmReg1[0,128] = val; + YmmReg1[128,128] = val; + ZmmReg1 = zext(YmmReg1); } # VPERM2I128 5-360 PAGE 2184 LINE 112312 diff --git a/pypcode/processors/x86/data/languages/ia.sinc b/pypcode/processors/x86/data/languages/ia.sinc index b5ec7a8a..f7a29889 100644 --- a/pypcode/processors/x86/data/languages/ia.sinc +++ b/pypcode/processors/x86/data/languages/ia.sinc @@ -1212,6 +1212,10 @@ YmmReg2_m256: m256 is m256 { export m256; } ZmmReg2_m512: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512: m512 is m512 { export m512; } +# used to extend ZmmReg2 if not assigning to m128 +XmmReg2_m128_extend: XmmReg2 is mod=3 & XmmReg2 & ZmmReg2 { ZmmReg2 = zext(XmmReg2); } +XmmReg2_m128_extend: XmmReg2 is mod & XmmReg2 { } + m32bcst64: m32 is m32 { local tmp:4 = m32; BCST8[0,32] = tmp; BCST8[32,32] = tmp; export BCST8; } m32bcst128: m32 is m32 { local tmp:4 = m32; BCST16[0,32] = tmp; BCST16[32,32] = tmp; BCST16[64,32] = tmp; BCST16[96,32] = tmp; export BCST16; } m32bcst256: m32 is m32 { @@ -2586,10 +2590,10 @@ define pcodeop clzero; :CMC is vexMode=0 & byte=0xf5 { CF = CF==0; } -:CMOV^cc Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; row=4 & cc; rm16 & Reg16 ... { if (!cc) goto inst_next; Reg16 = rm16; } -:CMOV^cc Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; row=4 & cc; rm32 & Reg32 ... & check_Reg32_dest ... { build check_Reg32_dest; if (!cc) goto inst_next; Reg32 = rm32;} +:CMOV^cc Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; row=4 & cc; rm16 & Reg16 ... { local tmp = rm16; if (!cc) goto inst_next; Reg16 = tmp; } +:CMOV^cc Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; row=4 & cc; rm32 & Reg32 ... & check_Reg32_dest ... { local tmp = rm32; build check_Reg32_dest; if (!cc) goto inst_next; Reg32 = tmp; } @ifdef IA64 -:CMOV^cc Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; row=4 & cc; rm64 & Reg64 ... { if (!cc) goto inst_next; Reg64 = rm64; } +:CMOV^cc Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; row=4 & cc; rm64 & Reg64 ... { local tmp = rm64; if (!cc) goto inst_next; Reg64 = tmp; } @endif :CMP AL,imm8 is vexMode=0 & byte=0x3c; AL & imm8 { subflags( AL,imm8 ); local tmp = AL - imm8; resultflags(tmp); } @@ -4073,13 +4077,13 @@ define pcodeop smm_restore_state; :SBB AX,imm16 is vexMode=0 & opsize=0 & byte=0x1d; AX & imm16 { subCarryFlags( AX, imm16 ); resultflags(AX); } :SBB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x1d; EAX & check_EAX_dest & imm32 { subCarryFlags( EAX, imm32 ); build check_EAX_dest; resultflags(EAX); } @ifdef IA64 -:SBB RAX,imm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1d; RAX & imm32 { subCarryFlags( RAX, imm32 ); resultflags(RAX); } +:SBB RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1d; RAX & simm32 { subCarryFlags( RAX, simm32 ); resultflags(RAX); } @endif :SBB Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=3; imm8 { subCarryFlags( Rmr8, imm8 ); resultflags(Rmr8); } :SBB Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=3; imm16 { subCarryFlags( Rmr16, imm16 ); resultflags(Rmr16); } :SBB Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3; imm32 { subCarryFlags( Rmr32, imm32 ); build check_Rmr32_dest; resultflags(Rmr32); } @ifdef IA64 -:SBB Rmr64,imm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=3; imm32 { subCarryFlags( Rmr64, imm32 ); resultflags(Rmr64); } +:SBB Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=3; simm32 { subCarryFlags( Rmr64, simm32 ); resultflags(Rmr64); } @endif :SBB Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=3; simm8_16 { subCarryFlags( Rmr16, simm8_16 ); resultflags(Rmr16); } @@ -5301,7 +5305,7 @@ CMPPS_OPERAND: ", "^imm8 is imm8 { } xmmTmp2_Da = XmmReg2[0,32]; xmmTmp2_Db = XmmReg2[32,32]; xmmTmp2_Dc = XmmReg2[64,32]; - xmmTmp2_Dc = XmmReg2[96,32]; + xmmTmp2_Dd = XmmReg2[96,32]; build XmmCondPS; @@ -5463,8 +5467,9 @@ CMPSS_OPERAND: ", "^imm8 is imm8 { } :CVTDQ2PD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2 { - XmmReg1[0,64] = int2float( XmmReg2[0,32] ); - XmmReg1[64,64] = int2float( XmmReg2[32,32] ); + local tmp:8 = XmmReg2[0,64]; + XmmReg1[0,64] = int2float( tmp[0,32] ); + XmmReg1[64,64] = int2float( tmp[32,32] ); } :CVTDQ2PS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; m128 & XmmReg ... @@ -5583,8 +5588,9 @@ CMPSS_OPERAND: ", "^imm8 is imm8 { } :CVTPS2PD XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2 { - XmmReg1[0,64] = float2float( XmmReg2[0,32] ); - XmmReg1[64,64] = float2float( XmmReg2[32,32] ); + local tmp:8 = XmmReg2[0,64]; + XmmReg1[0,64] = float2float( tmp[0,32] ); + XmmReg1[64,64] = float2float( tmp[32,32] ); } :CVTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; mmxreg ... & m64 @@ -5615,12 +5621,12 @@ CMPSS_OPERAND: ", "^imm8 is imm8 { } @ifdef IA64 :CVTSD2SI Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; Reg64 ... & m64 { - Reg64 = round(m64); + Reg64 = trunc(round(m64)); } :CVTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2 { - Reg64 = round(XmmReg2[0,64]); + Reg64 = trunc(round(XmmReg2[0,64])); } @endif @@ -5670,12 +5676,12 @@ CMPSS_OPERAND: ", "^imm8 is imm8 { } :CVTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; Reg32 ... & m32 { - Reg32 = round(m32); + Reg32 = trunc(round(m32)); } :CVTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & XmmReg2 { - Reg32 = round(XmmReg2[0,32]); + Reg32 = trunc(round(XmmReg2[0,32])); } @ifdef IA64 @@ -5837,8 +5843,9 @@ define pcodeop divps; :HADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2 { + local tmp:16 = XmmReg2; XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg1[64,64]; - XmmReg1[64,64] = XmmReg2[0,64] f+ XmmReg2[64,64]; + XmmReg1[64,64] = tmp[0,64] f+ tmp[64,64]; } :HADDPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; m128 & XmmReg ... @@ -5852,10 +5859,11 @@ define pcodeop divps; :HADDPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2 { + local tmp:16 = XmmReg2; XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg1[32,32]; XmmReg1[32,32] = XmmReg1[64,32] f+ XmmReg1[96,32]; - XmmReg1[64,32] = XmmReg2[0,32] f+ XmmReg2[32,32]; - XmmReg1[96,32] = XmmReg2[64,32] f+ XmmReg2[96,32]; + XmmReg1[64,32] = tmp[0,32] f+ tmp[32,32]; + XmmReg1[96,32] = tmp[64,32] f+ tmp[96,32]; } :HSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; m128 & XmmReg ... @@ -5867,8 +5875,9 @@ define pcodeop divps; :HSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2 { + local tmp:16 = XmmReg2; XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg1[64,64]; - XmmReg1[64,64] = XmmReg2[0,64] f- XmmReg2[64,64]; + XmmReg1[64,64] = tmp[0,64] f- tmp[64,64]; } :HSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; m128 & XmmReg ... @@ -5882,10 +5891,11 @@ define pcodeop divps; :HSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2 { + local tmp:16 = XmmReg2; XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg1[32,32]; XmmReg1[32,32] = XmmReg1[64,32] f- XmmReg1[96,32]; - XmmReg1[64,32] = XmmReg2[0,32] f- XmmReg2[32,32]; - XmmReg1[96,32] = XmmReg2[64,32] f- XmmReg2[96,32]; + XmmReg1[64,32] = tmp[0,32] f- tmp[32,32]; + XmmReg1[96,32] = tmp[64,32] f- tmp[96,32]; } #-------------------- @@ -6364,7 +6374,7 @@ define pcodeop packssdw; #otherwise ubyte = sword macro sswub(sword, ubyte) { ubyte = (sword s> 0xff:2) * 0xff:1; - ubyte = ubyte + (sword s> 0:2) * (sword s< 0xff:2) * sword:1; + ubyte = ubyte + (sword s> 0:2) * (sword s<= 0xff:2) * sword:1; } :PACKUSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxreg ... & m64 @@ -8855,7 +8865,7 @@ define pcodeop extractps; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); - Mem = temp:1; + *Mem = temp:1; } :PEXTRD Rmr32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; mod=3 & XmmReg & Rmr32 & check_Rmr32_dest; imm8 @@ -8874,7 +8884,7 @@ define pcodeop extractps; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); - Mem = temp:4; + *Mem = temp:4; } @ifdef IA64 @@ -8887,7 +8897,9 @@ define pcodeop extractps; :PEXTRQ Mem, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & Mem; imm8 { local high:1 = imm8 & 0x1; - conditionalAssign(Mem,high,XmmReg[64,64],XmmReg[0,64]); + local temp:8; + conditionalAssign(temp,high,XmmReg[64,64],XmmReg[0,64]); + *Mem = temp; } @endif diff --git a/pypcode/processors/x86/data/languages/lockable.sinc b/pypcode/processors/x86/data/languages/lockable.sinc index f9aacd67..746170bf 100644 --- a/pypcode/processors/x86/data/languages/lockable.sinc +++ b/pypcode/processors/x86/data/languages/lockable.sinc @@ -979,11 +979,11 @@ } @ifdef IA64 -:SBB^lockx m64,imm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=3 ...; imm32 +:SBB^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=3 ...; simm32 { build lockx; build m64; - subCarryFlags( m64, imm32 ); + subCarryFlags( m64, simm32 ); resultflags(m64); build unlock; } diff --git a/pypcode/processors/x86/data/languages/x86-32-golang.register.info b/pypcode/processors/x86/data/languages/x86-32-golang.register.info index 28c42ff0..98b573e0 100644 --- a/pypcode/processors/x86/data/languages/x86-32-golang.register.info +++ b/pypcode/processors/x86/data/languages/x86-32-golang.register.info @@ -5,5 +5,6 @@ + \ No newline at end of file diff --git a/pypcode/processors/x86/data/languages/x86-64-golang.cspec b/pypcode/processors/x86/data/languages/x86-64-golang.cspec index 8196a9a6..ed1d68d3 100644 --- a/pypcode/processors/x86/data/languages/x86-64-golang.cspec +++ b/pypcode/processors/x86/data/languages/x86-64-golang.cspec @@ -26,16 +26,16 @@ - + - + - + - + @@ -63,7 +63,7 @@ - + @@ -195,6 +195,14 @@ + + + + + + + + @@ -228,8 +236,6 @@ - - diff --git a/pypcode/processors/x86/data/languages/x86-64-golang.register.info b/pypcode/processors/x86/data/languages/x86-64-golang.register.info index 49222ece..802caffb 100644 --- a/pypcode/processors/x86/data/languages/x86-64-golang.register.info +++ b/pypcode/processors/x86/data/languages/x86-64-golang.register.info @@ -6,5 +6,14 @@ + + + + + + + + + \ No newline at end of file diff --git a/pypcode/processors/x86/data/languages/x86-64.pspec b/pypcode/processors/x86/data/languages/x86-64.pspec index 08cefc96..45df40cd 100644 --- a/pypcode/processors/x86/data/languages/x86-64.pspec +++ b/pypcode/processors/x86/data/languages/x86-64.pspec @@ -76,6 +76,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -92,22 +124,54 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/processors/x86/data/languages/x86.pspec b/pypcode/processors/x86/data/languages/x86.pspec index 595d1e2d..b473a93b 100644 --- a/pypcode/processors/x86/data/languages/x86.pspec +++ b/pypcode/processors/x86/data/languages/x86.pspec @@ -60,38 +60,102 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pypcode/sleigh/Makefile b/pypcode/sleigh/Makefile index 814e1daa..69b8dcf1 100644 --- a/pypcode/sleigh/Makefile +++ b/pypcode/sleigh/Makefile @@ -38,12 +38,12 @@ endif CXX=g++ -std=c++11 # Debug flags -DBG_CXXFLAGS=-g -Wall -Wno-sign-compare $(CFLAGS) +DBG_CXXFLAGS=-g -Wall -Wno-sign-compare #DBG_CXXFLAGS=-g -pg -Wall -Wno-sign-compare #DBG_CXXFLAGS=-g -fprofile-arcs -ftest-coverage -Wall -Wno-sign-compare # Optimization flags -OPT_CXXFLAGS=-O2 -Wall -Wno-sign-compare $(CFLAGS) +OPT_CXXFLAGS=-O2 -Wall -Wno-sign-compare YACC=bison @@ -83,7 +83,7 @@ DECCORE=capability architecture options graph cover block cast typeop database c type variable varmap jumptable emulate emulateutil flow userop multiprecision \ funcdata funcdata_block funcdata_op funcdata_varnode unionresolve pcodeinject \ heritage prefersplit rangeutil ruleaction subflow blockaction merge double \ - transform coreaction condexe override dynamic crc32 prettyprint \ + transform constseq coreaction condexe override dynamic crc32 prettyprint \ printlanguage printc printjava memstate opbehavior paramid signature $(COREEXT_NAMES) # Files used for any project that use the sleigh decoder SLEIGH= sleigh pcodeparse pcodecompile sleighbase slghsymbol \ @@ -411,6 +411,6 @@ reallyclean: clean rm -rf com_dbg com_opt test_dbg ghi_dbg ghi_opt sla_dbg sla_opt rm -f $(EXECS) TAGS *~ -sleigh_src: - mkdir -p sleigh_src - cp $(LIBSLA_SOURCE) Makefile sleigh_src +sleigh: + mkdir -p sleigh + cp $(LIBSLA_SOURCE) Makefile sleigh diff --git a/pypcode/sleigh/filemanage.cc b/pypcode/sleigh/filemanage.cc index aa1d06ca..d6ead436 100644 --- a/pypcode/sleigh/filemanage.cc +++ b/pypcode/sleigh/filemanage.cc @@ -34,8 +34,10 @@ namespace ghidra { // Path name separator #ifdef _WINDOWS char FileManage::separator = '\\'; +char FileManage::separatorClass[] = "/\\"; #else char FileManage::separator = '/'; +char FileManage::separatorClass[] = "/"; #endif void FileManage::addDir2Path(const string &path) @@ -43,7 +45,7 @@ void FileManage::addDir2Path(const string &path) { if (path.size()>0) { pathlist.push_back(path); - if (path[path.size()-1] != separator) + if (!isSeparator(path[path.size()-1])) pathlist.back() += separator; } } @@ -53,7 +55,7 @@ void FileManage::findFile(string &res,const string &name) const { // Search through paths to find file with given name vector::const_iterator iter; - if (name[0] == separator) { + if (isSeparator(name[0])) { res = name; ifstream s(res.c_str()); if (s) { @@ -122,6 +124,22 @@ bool FileManage::isDirectory(const string &path) #endif +#ifdef _WINDOWS +bool FileManage::isSeparator(char c) + +{ + return (c == '/' || c == '\\'); +} + +#else +bool FileManage::isSeparator(char c) + +{ + return c == separator; +} + +#endif + #ifdef _WINDOWS void FileManage::matchListDir(vector &res,const string &match,bool isSuffix,const string &dirname,bool allowdot) @@ -131,7 +149,7 @@ void FileManage::matchListDir(vector &res,const string &match,bool isSuf string dirfinal; dirfinal = dirname; - if (dirfinal[dirfinal.size()-1] != separator) + if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; string regex = dirfinal + '*'; @@ -162,7 +180,7 @@ void FileManage::matchListDir(vector &res,const string &match,bool isSuf DIR *dir; struct dirent *entry; string dirfinal = dirname; - if (dirfinal[dirfinal.size()-1] != separator) + if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; dir = opendir(dirfinal.c_str()); @@ -205,7 +223,7 @@ void FileManage::directoryList(vector &res,const string &dirname,bool al WIN32_FIND_DATAA FindFileData; HANDLE hFind; string dirfinal = dirname; - if (dirfinal[dirfinal.size()-1] != separator) + if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; string regex = dirfinal + "*"; const char *s = regex.c_str(); @@ -232,7 +250,7 @@ void FileManage::directoryList(vector &res,const string &dirname,bool al string dirfinal; dirfinal = dirname; - if (dirfinal[dirfinal.size()-1] != separator) + if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; dir = opendir(dirfinal.c_str()); @@ -262,7 +280,7 @@ void FileManage::scanDirectoryRecursive(vector &res,const string &matchn vector::const_iterator iter; for(iter = subdir.begin();iter!=subdir.end();++iter) { const string &curpath( *iter ); - string::size_type pos = curpath.rfind(separator); + string::size_type pos = curpath.find_last_of(separatorClass); if (pos == string::npos) pos = 0; else @@ -280,9 +298,9 @@ void FileManage::splitPath(const string &full,string &path,string &base) // If there is no path, i.e. only a basename in full, then -path- will return as an empty string // otherwise -path- will be non-empty and end in a separator character string::size_type end = full.size()-1; - if (full[full.size()-1] == separator) // Take into account terminating separator + if (isSeparator(full[full.size()-1])) // Take into account terminating separator end = full.size()-2; - string::size_type pos = full.rfind(separator,end); + string::size_type pos = full.find_last_of(separatorClass,end); if (pos == string::npos) { // Didn't find any separator base = full; path.clear(); diff --git a/pypcode/sleigh/filemanage.hh b/pypcode/sleigh/filemanage.hh index defa54e7..9c2ab1b6 100644 --- a/pypcode/sleigh/filemanage.hh +++ b/pypcode/sleigh/filemanage.hh @@ -34,6 +34,7 @@ using std::ostringstream; class FileManage { vector pathlist; // List of paths to search for files static char separator; + static char separatorClass[]; // Characters that can be accepted as a separator static string buildPath(const vector &pathels,int level); static bool testDevelopmentPath(const vector &pathels,int level,string &root); static bool testInstallPath(const vector &pathels,int level,string &root); @@ -42,6 +43,7 @@ public: void addCurrentDir(void); void findFile(string &res,const string &name) const; // Resolve full pathname void matchList(vector &res,const string &match,bool isSuffix) const; // List of files with suffix + static bool isSeparator(char c); static bool isDirectory(const string &path); static void matchListDir(vector &res,const string &match,bool isSuffix,const string &dir,bool allowdot); static void directoryList(vector &res,const string &dirname,bool allowdot=false); diff --git a/pypcode/sleigh/float.cc b/pypcode/sleigh/float.cc index 913af357..81df78ca 100644 --- a/pypcode/sleigh/float.cc +++ b/pypcode/sleigh/float.cc @@ -5,9 +5,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -99,11 +99,11 @@ FloatFormat::floatclass FloatFormat::extractExpSig(double x,bool *sgn,uintb *sig x = -x; double norm = frexp(x,&e); // norm is between 1/2 and 1 norm = ldexp(norm,8*sizeof(uintb)-1); // norm between 2^62 and 2^63 - + *signif = (uintb)norm; // Convert to normalized integer *signif <<= 1; - e -= 1; // Consider normalization between 1 and 2 + e -= 1; // Consider normalization between 1 and 2 *exp = e; return normalized; } @@ -217,8 +217,9 @@ uintb FloatFormat::getNaNEncoding(bool sgn) const void FloatFormat::calcPrecision(void) { - float val = frac_size * 0.30103; - decimal_precision = (int4)floor(val + 0.5); + decimalMinPrecision = (int4)floor(frac_size * 0.30103); + // Precision needed to guarantee IEEE 754 binary -> decimal -> binary round trip conversion + decimalMaxPrecision = (int4)ceil((frac_size + 1) * 0.30103) + 1; } /// \param encoding is the encoding value @@ -417,6 +418,47 @@ uintb FloatFormat::convertEncoding(uintb encoding, return setSign(res, sgn); } +/// The string should be printed with the minimum number of digits to uniquely specify the underlying +/// binary value. This currently only works for the 32-bit and 64-bit IEEE 754 formats. +/// If the \b forcesci parameter is \b true, the string will always be printed using scientific notation. +/// \param host is the given value already converted to the host's \b double format. +/// \param forcesci is \b true if the value should be printed in scientific notation. +/// \return the decimal representation as a string +string FloatFormat::printDecimal(double host,bool forcesci) const + +{ + string res; + for(int4 prec=decimalMinPrecision;;++prec) { + ostringstream s; + if (forcesci) { + s.setf( ios::scientific ); // Set to scientific notation + s.precision(prec-1); // scientific doesn't include first digit in precision count + } + else { + s.unsetf( ios::floatfield ); // Use "default" notation to allow fewer digits to be printed if possible + s.precision(prec); + } + s << host; + if (prec == decimalMaxPrecision) { + return s.str(); + } + res = s.str(); + double roundtrip = 0.0; + istringstream t(res); + if (size <= 4) { + float tmp = 0.0; + t >> tmp; + roundtrip = tmp; + } + else { + t >> roundtrip; + } + if (roundtrip == host) + break; + } + return res; +} + // Currently we emulate floating point operations on the target // By converting the encoding to the host's encoding and then // performing the operation using the host's floating point unit diff --git a/pypcode/sleigh/float.hh b/pypcode/sleigh/float.hh index 376cc5a3..06429dc7 100644 --- a/pypcode/sleigh/float.hh +++ b/pypcode/sleigh/float.hh @@ -4,9 +4,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -48,7 +48,8 @@ private: int4 exp_size; ///< Number of bits in exponent int4 bias; ///< What to add to real exponent to get encoding int4 maxexponent; ///< Maximum possible exponent - int4 decimal_precision; ///< Number of decimal digits of precision + int4 decimalMinPrecision; ///< Minimum decimal digits of precision guaranteed by the format + int4 decimalMaxPrecision; ///< Maximum decimal digits of precision needed to uniquely represent value bool jbitimplied; ///< Set to \b true if integer bit of 1 is assumed static double createFloat(bool sign,uintb signif,int4 exp); ///< Create a double given sign, fractional, and exponent static floatclass extractExpSig(double x,bool *sgn,uintb *signif,int4 *exp); @@ -65,13 +66,14 @@ public: int4 getSize(void) const { return size; } ///< Get the size of the encoding in bytes double getHostFloat(uintb encoding,floatclass *type) const; ///< Convert an encoding into host's double uintb getEncoding(double host) const; ///< Convert host's double into \b this encoding - int4 getDecimalPrecision(void) const { return decimal_precision; } ///< Get number of digits of precision uintb convertEncoding(uintb encoding,const FloatFormat *formin) const; ///< Convert between two different formats uintb extractFractionalCode(uintb x) const; ///< Extract the fractional part of the encoding bool extractSign(uintb x) const; ///< Extract the sign bit from the encoding int4 extractExponentCode(uintb x) const; ///< Extract the exponent from the encoding + string printDecimal(double host,bool forcesci) const; ///< Print given value as a decimal string + // Operations on floating point values uintb opEqual(uintb a,uintb b) const; ///< Equality comparison (==) diff --git a/pypcode/sleigh/sleighbase.cc b/pypcode/sleigh/sleighbase.cc index 7eb86597..72ea14e3 100644 --- a/pypcode/sleigh/sleighbase.cc +++ b/pypcode/sleigh/sleighbase.cc @@ -199,7 +199,7 @@ void SleighBase::encodeSlaSpace(Encoder &encoder,AddrSpace *spc) const // encoder.writeSignedInteger(sla::ATTRIB_DEADCODEDELAY, spc->getDeadcodeDelay()); encoder.writeSignedInteger(sla::ATTRIB_SIZE, spc->getAddrSize()); if (spc->getWordSize() > 1) - encoder.writeUnsignedInteger(sla::ATTRIB_WORDSIZE, spc->getWordSize()); + encoder.writeSignedInteger(sla::ATTRIB_WORDSIZE, spc->getWordSize()); encoder.writeBool(sla::ATTRIB_PHYSICAL, spc->hasPhysical()); if (spc->getType() == IPTR_INTERNAL) encoder.closeElement(sla::ELEM_SPACE_UNIQUE); @@ -272,7 +272,7 @@ AddrSpace *SleighBase::decodeSlaSpace(Decoder &decoder,const Translate *trans) else if (attribId == sla::ATTRIB_SIZE) addressSize = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_WORDSIZE) - wordsize = decoder.readUnsignedInteger(); + wordsize = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_BIGENDIAN) { bigEnd = decoder.readBool(); } diff --git a/pypcode/sleigh/space.cc b/pypcode/sleigh/space.cc index bda09fc9..dbaa2e77 100644 --- a/pypcode/sleigh/space.cc +++ b/pypcode/sleigh/space.cc @@ -4,9 +4,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -34,10 +34,13 @@ AttributeId ATTRIB_PIECE = AttributeId("piece",94); // Open slots 94-102 void AddrSpace::calcScaleMask(void) { - pointerLowerBound = (addressSize < 3) ? 0x100: 0x1000; highest = calc_mask(addressSize); // Maximum address highest = highest * wordsize + (wordsize-1); // Maximum byte address + pointerLowerBound = 0; pointerUpperBound = highest; + uintb bufferSize = (addressSize < 3) ? 0x100 : 0x1000; + pointerLowerBound += bufferSize; + pointerUpperBound -= bufferSize; } /// Initialize an address space with its basic attributes diff --git a/scripts/sleigh_download.sh b/scripts/sleigh_download.sh index bbc16b75..d272807a 100755 --- a/scripts/sleigh_download.sh +++ b/scripts/sleigh_download.sh @@ -2,7 +2,7 @@ set -e set -x -git clone --depth=1 -b Ghidra_11.1.2_build https://github.com/NationalSecurityAgency/ghidra.git ghidra_src +git clone --depth=1 -b Ghidra_11.2.1_build https://github.com/NationalSecurityAgency/ghidra.git ghidra_src # We just need Makefile and $(LIBSLA_SOURCE) defined inside Makefile. Do it this # way to make sure we stay up to date with the list of required files.